|
{ |
|
"paper_id": "2022", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T01:02:46.078646Z" |
|
}, |
|
"title": "Fewer Errors, but More Stereotypes? The Effect of Model Size on Gender Bias", |
|
"authors": [ |
|
{ |
|
"first": "Yarden", |
|
"middle": [], |
|
"last": "Tal", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The Hebrew University of Jerusalem", |
|
"location": { |
|
"country": "Israel" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Inbal", |
|
"middle": [], |
|
"last": "Magar", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The Hebrew University of Jerusalem", |
|
"location": { |
|
"country": "Israel" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Roy", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The Hebrew University of Jerusalem", |
|
"location": { |
|
"country": "Israel" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "The size of pretrained models is increasing, and so is their performance on a variety of NLP tasks. However, as their memorization capacity grows, they might pick up more social biases. In this work, we examine the connection between model size and its gender bias (specifically, occupational gender bias). We measure bias in three masked language model families (RoBERTa, DeBERTa, and T5) in two setups: directly using prompt based method, and using a downstream task (Winogender). We find on the one hand that larger models receive higher bias scores on the former task, but when evaluated on the latter, they make fewer gender errors. To examine these potentially conflicting results, we carefully investigate the behavior of the different models on Winogender. We find that while larger models outperform smaller ones, the probability that their mistakes are caused by gender bias is higher. Moreover, we find that the proportion of stereotypical errors compared to anti-stereotypical ones grows with the model size. Our findings highlight the potential risks that can arise from increasing model size. 1", |
|
"pdf_parse": { |
|
"paper_id": "2022", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "The size of pretrained models is increasing, and so is their performance on a variety of NLP tasks. However, as their memorization capacity grows, they might pick up more social biases. In this work, we examine the connection between model size and its gender bias (specifically, occupational gender bias). We measure bias in three masked language model families (RoBERTa, DeBERTa, and T5) in two setups: directly using prompt based method, and using a downstream task (Winogender). We find on the one hand that larger models receive higher bias scores on the former task, but when evaluated on the latter, they make fewer gender errors. To examine these potentially conflicting results, we carefully investigate the behavior of the different models on Winogender. We find that while larger models outperform smaller ones, the probability that their mistakes are caused by gender bias is higher. Moreover, we find that the proportion of stereotypical errors compared to anti-stereotypical ones grows with the model size. Our findings highlight the potential risks that can arise from increasing model size. 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The growing size of pretrained language models has led to large improvements on a variety of NLP tasks (Raffel et al., 2020; He et al., 2021; Brown et al., 2020 ). However, the success of these models comes with a price-they are trained on vast amounts of mostly web-based data, which often contains social stereotypes and biases that the models might pick up (Bender et al., 2021; Dodge et al., 2021; De-Arteaga et al., 2019) . Combined with recent evidence that the memorization capacity of training data grows with model size (Magar and Schwartz, 2022; Carlini et al., 2022) , the risk of Figure 1 : We study the effect of model size on occupational gender bias in two setups: using prompt based method (A), and using Winogender as a downstream task (B). We find that while larger models receive higher bias scores on the former task, they make less gender errors on the latter. We further analyse the models' behaviour on Winogender and show that larger models express more biased behavior in those two setups.", |
|
"cite_spans": [ |
|
{ |
|
"start": 103, |
|
"end": 124, |
|
"text": "(Raffel et al., 2020;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 125, |
|
"end": 141, |
|
"text": "He et al., 2021;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 142, |
|
"end": 160, |
|
"text": "Brown et al., 2020", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 360, |
|
"end": 381, |
|
"text": "(Bender et al., 2021;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 382, |
|
"end": 401, |
|
"text": "Dodge et al., 2021;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 402, |
|
"end": 426, |
|
"text": "De-Arteaga et al., 2019)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 529, |
|
"end": 555, |
|
"text": "(Magar and Schwartz, 2022;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 556, |
|
"end": 577, |
|
"text": "Carlini et al., 2022)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 592, |
|
"end": 600, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "language models containing these biases is even higher. This can have negative consequences, as models can abuse these biases in downstream tasks or applications. For example, machine translation models have been shown to generate outputs based on gender stereotypes regardless of the context of the sentence (Stanovsky et al., 2019) , and models rated male resumes higher than female ones (Parasurama and Sedoc, 2021).", |
|
"cite_spans": [ |
|
{ |
|
"start": 309, |
|
"end": 333, |
|
"text": "(Stanovsky et al., 2019)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "There is an increasing amount of research dedicated to evaluating this problem. For example, several works studied the bias in models using downstream tasks such as coreference resolution Zhao et al., 2018) , natural language inference (NLI) (Poliak et al., 2018; Sharma et al., 2021) and machine translation (Stanovsky et al., 2019) . Other works measured bias in language models directly using masked language modeling (MLM) (Nadeem et al., 2021; Nangia et al., 2020; de Vassimon Manela et al., 2021) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 188, |
|
"end": 206, |
|
"text": "Zhao et al., 2018)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 242, |
|
"end": 263, |
|
"text": "(Poliak et al., 2018;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 264, |
|
"end": 284, |
|
"text": "Sharma et al., 2021)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 309, |
|
"end": 333, |
|
"text": "(Stanovsky et al., 2019)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 427, |
|
"end": 448, |
|
"text": "(Nadeem et al., 2021;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 449, |
|
"end": 469, |
|
"text": "Nangia et al., 2020;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 470, |
|
"end": 502, |
|
"text": "de Vassimon Manela et al., 2021)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we examine how model size af-fects gender bias ( Fig. 1) . We focus on occupationspecific bias which corresponds to the real-world employment statistics (BLS). 2 We measure the bias in three model families (RoBERTa; Liu et al., 2019, DeBERTa; He et al., 2021 and T5; Raffel et al., 2020) in two different ways: using MLM prompts and using the Winogender benchmark . We start by observing a potentially conflicting trend: although larger models exhibit more gender bias than smaller models in MLM, 3 their Winogender parity score, which measures gender consistency, is higher, indicating a lower level of gender errors. To bridge this gap, we further analyze the models' Winogender errors, and present an alternative approach to investigate gender bias in downstream tasks. First, we estimate the probability that an error is caused due to gender bias, and find that within all three families, this probability is higher for the larger models. Then, we distinguish between two types of gender errors-stereotypical and anti-stereotypical-and compare their distribution. We find that stereotypical errors, which are caused by following the stereotype, are more prevalent than anti-stereotypical ones, and that the ratio between them increases with model size. Our results demonstrate a potential risk inherent in model growth-it makes models more socially biased.", |
|
"cite_spans": [ |
|
{ |
|
"start": 175, |
|
"end": 176, |
|
"text": "2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 231, |
|
"end": 257, |
|
"text": "Liu et al., 2019, DeBERTa;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 258, |
|
"end": 281, |
|
"text": "He et al., 2021 and T5;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 282, |
|
"end": 302, |
|
"text": "Raffel et al., 2020)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 64, |
|
"end": 71, |
|
"text": "Fig. 1)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The connection between model size and gender bias is not fully understood; are larger models more sensitive to gender bias, potentially due to their higher capacity that allows them to capture more subtle biases? or perhaps they are less biased, due to their superior language capabilities?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Are Larger Models More Biased?", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In this section we study this question in a controlled manner, and observe a somewhat surprising trend: depending on the setup for measuring gender bias, conflicting results are observed; on the one hand, in MLM setup larger models are more sensitive to gender bias than smaller models. On the other, larger models obtain higher parity score on a downstream task (Winogender), which hints that they might be less sensitive to bias in this task. We describe our findings below.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Are Larger Models More Biased?", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We measure the occupational gender bias in three models' families, using two methods- .\" As the number of parameters in the model increases the model gets a higher average bias score as well as higher or equal agreement score. prompt based method (Kurita et al., 2019) and Winogender schema . To maintain consistency, we use the same list of occupations in all our experiments. The gender stereotypicality of an occupation is determined by the U.S. Bureau of Labor Statistics (BLS). 4", |
|
"cite_spans": [ |
|
{ |
|
"start": 247, |
|
"end": 268, |
|
"text": "(Kurita et al., 2019)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Are Larger Models More Biased?", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Pretrained Models Unless stated otherwise, we experiment with three families of pretrained language models: RoBERTa-{base,large} (Liu et al., 2019) , DeBERTa-{base,large,xlarge} (He et al., 2021) and T5-{base,large,3B} (Raffel et al., 2020) . We provide implementation details in App. B.", |
|
"cite_spans": [ |
|
{ |
|
"start": 129, |
|
"end": 147, |
|
"text": "(Liu et al., 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 178, |
|
"end": 195, |
|
"text": "(He et al., 2021)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 219, |
|
"end": 240, |
|
"text": "(Raffel et al., 2020)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Are Larger Models More Biased?", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "To examine the model's sensitivity to gender bias we directly query the model using a simple prompt:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sensitivity to Gender Bias in MLM Increases with Model Size", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "\"[MASK]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sensitivity to Gender Bias in MLM Increases with Model Size", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "worked as a/an [OCCUPATION] .\" 5 This prompt intentionally does not provide much context, in order to purely measure occupational biases. As a measure of bias, we adopt Kurita et al. (2019)'s log probability bias score. We compare the normalized predictions 6 that the model assigns to \"he\" and \"she\", given the above prompt: for male occupations (according to BLS) we compute the difference with respect to \"he\", and for female occupations we compute the difference with respect to \"she\". Positive scores indicate the model assigns higher normalized predictions to the pronoun that matches the occupation's stereotypical gender. We experiment with RoBERTa and T5, 7 evaluating gender bias using two measures:", |
|
"cite_spans": [ |
|
{ |
|
"start": 15, |
|
"end": 27, |
|
"text": "[OCCUPATION]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sensitivity to Gender Bias in MLM Increases with Model Size", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "1. agreement: the percentage of occupations with positive bias score.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sensitivity to Gender Bias in MLM Increases with Model Size", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "2. average bias score: the average bias score of the occupations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sensitivity to Gender Bias in MLM Increases with Model Size", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "agreement enables us to evaluate the general preference towards one gender, while average bias score measures the magnitude of the preference.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sensitivity to Gender Bias in MLM Increases with Model Size", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Results Fig. 2 presents our results. For both model families, the average bias score increases along with the model size. Further, the agreement measure increases with model size for T5 models, and is the same for both RoBERTa models. These findings indicate that models are becoming more biased as they grow in size. This is consistent with prior work (Nadeem et al., 2021; Vig et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 353, |
|
"end": 374, |
|
"text": "(Nadeem et al., 2021;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 375, |
|
"end": 392, |
|
"text": "Vig et al., 2020)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 8, |
|
"end": 14, |
|
"text": "Fig. 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sensitivity to Gender Bias in MLM Increases with Model Size", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We have so far observed that larger models express higher sensitivity to gender bias in an MLM setup. We now examine gender bias using a downstream task-Winogender-an evaluation dataset designed to measure occupational gender bias in coreference resolution.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Larger Models Exhibit Less Bias in Winogender", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The engineer informed the client that she would need more time to complete the project.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "sentence type", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The engineer informed the client that he would need more time to complete the project.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "gotcha", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "not gotcha Table 1 : Examples of \"gotcha\" and \"not gotcha\" sentences from Winogender. In both sentences the pronoun refers to the engineer.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 11, |
|
"end": 18, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "gotcha", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Each example in the dataset contains an occupation (one of the occupations on the BLS list), a Per model family, larger models achieve both higher accuracies (Y axis) and parity scores (X axis) than smaller models. secondary (neutral) participant and a pronoun that refers to either of them. See Tab. 1 for examples.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "gotcha", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Winogender consists of \"gotcha\" and \"not gotcha\" sentences. Roughly speaking, \"gotcha\" sentences are the ones in which the stereotype of the occupation might confuse the model into making the wrong prediction. Consider the \"gotcha\" sentence in Tab. 1. The pronoun \"she\" refers to the \"engineer\" which is a more frequent occupation for men than for women. This tendency could cause the model to misinterpret \"she\" as \"the client\". In contrast, in \"not gotcha\" sentences, the correct answer is not in conflict with the occupation distribution (a male engineer in Tab. 1).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "gotcha", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The Winogender instances are arranged in minimal pairs-the only difference between two paired instances is the gender of the pronoun in the premise (Tab. 1). Importantly, the label for both instances is the same.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "gotcha", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We use the casting of Winogender as an NLI task (Poliak et al., 2018) , which is part of the Su-perGLUE benchmark . Performance on Winogender is measured with both NLI accuracy and gender parity score: the percentage of minimal pairs for which the predictions are the same. Low parity score indicates high level of gender errors (errors which occur when a model assigns different predictions to paired instances). These errors demonstrate the presence of gender bias in the model. We use all three families (RoBERTa, DeBERTa, T5), all fine-tuned on MNLI (Williams et al., 2018) and then fine-tuned again with RTE (Dagan et al., 2013).", |
|
"cite_spans": [ |
|
{ |
|
"start": 48, |
|
"end": 69, |
|
"text": "(Poliak et al., 2018)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 554, |
|
"end": 577, |
|
"text": "(Williams et al., 2018)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "gotcha", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Results Our results are shown in Fig. 3 . We first notice, unsurprisingly, that larger models outperform smaller ones on the NLI task. Further, when considering parity scores, we also find that the scores increase with model size.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 33, |
|
"end": 39, |
|
"text": "Fig. 3", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "gotcha", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Combined with our results in Sec. 2.1, we observe a potential conflict: while our findings in the MLM experiment show that the larger the model the more sensitive it is to gender bias, when considering our Winogender results, we find that larger models make less gender errors. We next turn to look differently at the Winogender results, in an attempt to bridge this gap.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "gotcha", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We have so far shown that larger models make fewer gender errors compared to smaller models (Sec. 2.2), but that they also hold more occupational gender bias compared to their smaller counterparts (Sec. 2.1). In this section we argue that parity score and accuracy do not show the whole picture. Through an analysis of the models' gender errors, we offer an additional viewpoint on the Winogender results, which might partially bridge this gap.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Winogender Errors Analysis Unravels Biased Behavior", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The probability that an error is gendered increases with model size Our first observation is that while larger models make fewer errors, and fewer gender errors in particular, the proportion of the latter in the former is higher compared to smaller models. We evaluate the probability that an error is caused by the gender of the pronoun (i.e., that an error is gendered). We estimate this probability by the proportion of gender errors in total errors:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Winogender Errors Analysis Unravels Biased Behavior", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "p(error is gendered) \u2248 |gender errors| |errors|", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Winogender Errors Analysis Unravels Biased Behavior", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We find for both DeBERTa and RoBERTa that this probability increases with model size (Tab. 2, gender column). In the extreme case (DeBERTaxlarge), 41% of the errors are gendered. Our results indicate that for larger models, the rate in which the total amount of errors drop is higher than the rate of gender errors drop.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Winogender Errors Analysis Unravels Biased Behavior", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Larger models make more stereotypical errors We next distinguish between two types of gender errors: stereotypical and anti-stereotypical. As described in Sec. Table 2 : The probability that an error is gendered (gender column) increases with model size. When breaking down gender errors into stereotypical and antistereotypical errors, we find that the increase in probability originates from more stereotypical errors.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 160, |
|
"end": 167, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Winogender Errors Analysis Unravels Biased Behavior", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "to \"gotcha\" and \"not gotcha\" instances. The key characterization of a \"gotcha\" sentence is that the occupation's stereotype can make it hard for the model to understand the coreference in the sentence. Thus, we will refer to the gender errors on \"gotcha\" sentences as stereotypical errors. 8 Accordingly, we will refer to gender errors on \"not gotcha\" sentences as anti-stereotypical errors. Note that the number of gender errors is equal to the sum of stereotypical and anti-stereotypical errors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Winogender Errors Analysis Unravels Biased Behavior", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We present in Tab. 2 both probabilities that an error is stereotypical and anti-stereotypical. Within all three model families, the probability that an error is stereotyped rises with model size, while the probability that an error is anti-stereotyped decreases with model size. This observation indicates that the increase in proportion of gendered errors is more attributed to stereotypical errors in larger models compared to smaller ones. Indeed, when considering the distribution of gender errors (Fig. 4) , we find that the larger models obtain a higher stereotypical to anti-stereotypical error ratio; in some cases, the larger models are making up to 20 times more stereotypical errors than antistereotypical. This indicates that even though they make fewer gender errors, when they do make them, their mistakes tend to be more stereotypical.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 502, |
|
"end": 510, |
|
"text": "(Fig. 4)", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Winogender Errors Analysis Unravels Biased Behavior", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Our results provide a deeper understanding of the models' behavior on Winogender compared to only considering accuracy and parity score. Combined with our MLM results (Sec. 2.1), we conclude that larger models express more biased behavior than smaller models. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Winogender Errors Analysis Unravels Biased Behavior", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Measuring bias in pretrained language models Earlier works presented evaluation datasets such as WEAT/SEAT, which measure bias in static word embedding using cosine similarity of specific target words (Caliskan et al., 2017; . Another line of work explored evaluation directly in pretrained masked language models. Kurita et al. (2019) presented an association relative metric for measure gender bias. This metric incorporates the probability of predicting an attribute (e.g \"programmer\") given the target for bias (e.g \"she\"), in a generic template such as \"<target> is [MASK]\". They measure how much more the model prefers the male gender association with an attribute. Nadeem et al. (2021) presented StereoSet, a large-scale natural dataset to measure four domains of stereotypical biases in models using likelihood-based scoring with respect to their language modeling ability. Nangia et al. (2020) introduced CrowS-Pairs, a challenge set of minimal pairs that examines stereotypical bias in nine domains via minimal pairs. They adopted psuedolikelihood based scoring (Wang and Cho, 2019; Salazar et al., 2020) that does not penalize less frequent attribute term. In our work, we build upon Kurita et al. (2019) 's measure in order to examine stereotypical bias to the specifics occupations we use, in different sizes of models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 201, |
|
"end": 224, |
|
"text": "(Caliskan et al., 2017;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 315, |
|
"end": 335, |
|
"text": "Kurita et al. (2019)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 672, |
|
"end": 692, |
|
"text": "Nadeem et al. (2021)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 882, |
|
"end": 902, |
|
"text": "Nangia et al. (2020)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 1072, |
|
"end": 1092, |
|
"text": "(Wang and Cho, 2019;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 1093, |
|
"end": 1114, |
|
"text": "Salazar et al., 2020)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 1195, |
|
"end": 1215, |
|
"text": "Kurita et al. (2019)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Another method to evaluate bias in pretrained models is through downstream tasks, such as coreference resolution Zhao et al., 2018) and sentiment analysis (Kiritchenko and Mohammad, 2018) . Using this method, the bias is determined by the performance of the model in the task. This allows for investigation of how much the bias of the model affects its performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 113, |
|
"end": 131, |
|
"text": "Zhao et al., 2018)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 155, |
|
"end": 187, |
|
"text": "(Kiritchenko and Mohammad, 2018)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Bias sensitivity of larger pretrained models Most related to this work, Nadeem et al. (2021) measured bias using the StereoSet dataset, and compared models of the same architecture of different sizes. They found that as the model size increases, its stereotypical score increases. For autocomplete generation, Vig et al. (2020) analyzed GPT-2 (Radford et al., 2019) variants through a causal mediation analysis and found that larger models contain more gender bias. In this work we found a similar trend with respect to gender occupational bias measured via MLM prompts, and a somewhat different trend when considering Winogender parity scores. Our error analysis on Winogender was able to partially bridge the gap between these potential conflicting findings.", |
|
"cite_spans": [ |
|
{ |
|
"start": 72, |
|
"end": 92, |
|
"text": "Nadeem et al. (2021)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 310, |
|
"end": 327, |
|
"text": "Vig et al. (2020)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 337, |
|
"end": 365, |
|
"text": "GPT-2 (Radford et al., 2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We investigated how a model's size affects its gender bias. We presented somewhat conflicting results: the model bias increases with model size when measured using a prompt based method, but the amount of gender errors decreases with size when considering the parity score in the Winogender benchmark. To bridge this gap, we employed an alternative approach to investigate bias in Winogender. Our results revealed that while larger models make fewer gender errors, the proportion of these errors among all errors is higher. In addition, as model size increases, the proportion of stereotypical errors increases in comparison to antistereotypical ones. Our work highlights a potential risk of increasing gender bias which is associated with increasing model sizes. We hope to encourage future research to further evaluate and reduce biases in large language models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In this paper, we examine how model size affects gender bias. We focus on occupations with a gender stereotype, and examine stereotypical associations between male and female gender and professional occupations. We measure bias in two setups: MLM (Kurita et al., 2019; Nadeem et al., 2021) and Winogender , and build on the enclosed works' definition of gender bias. 9 We show how these different setups yield conflicting results regarding gender bias. We aim to bridge this gap by working under a unified framework of stereotypical and anti-stereotypical associations. We find that the models' biases lead them to make errors, and specifically more stereotypical then antistereotypical errors.", |
|
"cite_spans": [ |
|
{ |
|
"start": 247, |
|
"end": 268, |
|
"text": "(Kurita et al., 2019;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 269, |
|
"end": 289, |
|
"text": "Nadeem et al., 2021)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 367, |
|
"end": 368, |
|
"text": "9", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bias Statement", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Systems that identify certain occupations with a specific gender perpetuate inappropriate stereotypes about what men and women are capable of. Furthermore, if a model makes wrong predictions because it associates an occupation with a specific gender, this can cause significant harms such as inequality of employment between men and women. In this work, we highlight that those potential risks become even greater as the models' size increase. Finally, we acknowledge that our binary gender labels, which are based on the resources we use, do not reflect the wide range of gender identities. In the future, we hope to extend our work to nonbinary genders as well.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bias Statement", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "with batch size 8. We use AdaFactor (Shazeer and Stern, 2018) optimizer with learning rate of 1e-4 and default parameters: \u03b2 1 = 0.0, \u03f5 = 1e-3, without weight decay. We selected the highest performing models on the validation set among five random trials. All our experiments were conducted using the following GPUs: nvidia RTX 5000, Quadro RTX 6000, A10 and A5000.", |
|
"cite_spans": [ |
|
{ |
|
"start": 36, |
|
"end": 61, |
|
"text": "(Shazeer and Stern, 2018)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bias Statement", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Our code is available at https://github.com/ schwartz-lab-NLP/model_size_and_gender_ bias", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://www.bls.gov/cps/cpsaat11.htm 3 This is consistent with previous findings(Nadeem et al., 2021;Vig et al., 2020).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Based on the resources we use, we assume a binary gender, which we recognize is a simplifying assumption.5 Results on two other prompts show very similar trends (see App. A).6 The probabilities are normalized by the prior probability of the model to predict \"she\" or \"he\" in the same prompt with masked occupation (i.e., \"[MASK] worked as a/an[MASK].\").", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "At the time of running the experiments, there were problems with running MLM with DeBERTa, which prevented us from experimenting with it (see https://github.com/ microsoft/DeBERTa/issues/74).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Equivalently, a stereotypical error is an error made on a \"gotcha\" instance, when the prediction on the \"not gotcha\" instance pair is correct.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We followed the huggingface recommendation for T5 fine-tuning settings https://discuss.huggingface. co/t/t5-finetuning-tips/684/3", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank Elad Shoham, Yuval Reif, Gabriel Stanovsky, Daniel Rotem, and Tomasz Limisiewicz for their feedback and insightful discussions. We also thank the anonymous reviewers for their valuable comments. This work was supported in part by the Israel Science Foundation (grant no. 2045/21) and by a research gift from the Allen Institute for AI.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "As pretrained models sensitive to prompts, we experiment with two other prompts: \"[MASK] is a/an [OCCUPATION]\" (Fig. 5) and \"Complete the sentence: [MASK] is a/an [OCCUPATION] .\" (Fig. 6 ). 10 The top predictions of T5-base were irrelevant to the given prompt. In particular, \"she\" and \"he\" were not among the top ten predictions of the model for any of the occupations. Therefore it is not presented. .\" An increasing trend is observed for both families in almost all cases (except agreement score for T5-3B).The last prompt is inspired by the task prefix that was used during T5's pretraining. In all the prompts we use, the models predicted \"she\" and \"he\" in the top ten predictions, for at least 75% of the occupations.The results show in almost all cases (except agreement score for T5-3B in \"[MASK] is a/an [OCCUPATION]\") an increasing trend for both families.", |
|
"cite_spans": [ |
|
{ |
|
"start": 97, |
|
"end": 119, |
|
"text": "[OCCUPATION]\" (Fig. 5)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 163, |
|
"end": 175, |
|
"text": "[OCCUPATION]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 190, |
|
"end": 192, |
|
"text": "10", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 179, |
|
"end": 186, |
|
"text": "(Fig. 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A Additional Prompts for MLM Setup", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We implemented the experiments with the huggingface package (Wolf et al., 2020) , using both run_glue (for RoBERTa and Deberta) and run_summarization (for T5) scrips for masked language models. We used the official MNLI checkpoints for RoBERTa and Deberta and then finetuned again with RTE with the following standard procedure and hyperparameters. We fine-tuned RoBERTa and DeBERTa on RTE for 6 epochs with batch size 32. We use AdamW optimizer (Loshchilov and Hutter, 2019) with learning rate of 2e-5 (for RoBERTa-{base,large}) and DeBERTa-{base}) and 1e-5 (for DeBERTa-{large,xlarge} and default parameters: \u03b2 1 = 0.9, \u03b2 2 = 0.999, \u03f5 = 1e-6, with weight decay of 0.1.For T5 we used the T5 1.0 checkpoint, which is trained on both unsupervised and downstream task data. We fine-tuned T5 11 on RTE for 6 epochs", |
|
"cite_spans": [ |
|
{ |
|
"start": 60, |
|
"end": 79, |
|
"text": "(Wolf et al., 2020)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 446, |
|
"end": 475, |
|
"text": "(Loshchilov and Hutter, 2019)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Implementation Details For Sec. 2.2", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "On the dangers of stochastic parrots: Can language models be too big? FAccT '21", |
|
"authors": [ |
|
{ |
|
"first": "Emily", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Bender", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timnit", |
|
"middle": [], |
|
"last": "Gebru", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angelina", |
|
"middle": [], |
|
"last": "Mcmillan-Major", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shmargaret", |
|
"middle": [], |
|
"last": "Shmitchell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "610--623", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3442188.3445922" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emily M. Bender, Timnit Gebru, Angelina McMillan- Major, and Shmargaret Shmitchell. 2021. On the dangers of stochastic parrots: Can language models be too big? FAccT '21, page 610-623, New York, NY, USA. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Ilya Sutskever, and Dario Amodei. 2020. Language models are few-shot learners", |
|
"authors": [ |
|
{ |
|
"first": "Tom", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Brown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Mann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nick", |
|
"middle": [], |
|
"last": "Ryder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Melanie", |
|
"middle": [], |
|
"last": "Subbiah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jared", |
|
"middle": [], |
|
"last": "Kaplan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prafulla", |
|
"middle": [], |
|
"last": "Dhariwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arvind", |
|
"middle": [], |
|
"last": "Neelakantan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Shyam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Girish", |
|
"middle": [], |
|
"last": "Sastry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanda", |
|
"middle": [], |
|
"last": "Askell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandhini", |
|
"middle": [], |
|
"last": "Agarwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ariel", |
|
"middle": [], |
|
"last": "Herbert-Voss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gretchen", |
|
"middle": [], |
|
"last": "Krueger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Henighan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rewon", |
|
"middle": [], |
|
"last": "Child", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aditya", |
|
"middle": [], |
|
"last": "Ramesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Ziegler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clemens", |
|
"middle": [], |
|
"last": "Winter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Hesse", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Sigler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mateusz", |
|
"middle": [], |
|
"last": "Litwin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Advances in Neural Information Processing Systems 33: 9 In MLM setup", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020. Language models are few-shot learners. In Ad- vances in Neural Information Processing Systems 33: 9 In MLM setup, stereotypes are taken into account, while in Winogender's parity score they are not.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Annual Conference on Neural Information Processing Systems 2020", |
|
"authors": [], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "2020", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Annual Conference on Neural Information Process- ing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Semantics derived automatically from language corpora contain human-like biases", |
|
"authors": [ |
|
{ |
|
"first": "Aylin", |
|
"middle": [], |
|
"last": "Caliskan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joanna", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Bryson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arvind", |
|
"middle": [], |
|
"last": "Narayanan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Science", |
|
"volume": "356", |
|
"issue": "6334", |
|
"pages": "183--186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1126/science.aal4230" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aylin Caliskan, Joanna J. Bryson, and Arvind Narayanan. 2017. Semantics derived automatically from language corpora contain human-like biases. Science, 356(6334):183-186.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Quantifying memorization across neural language models", |
|
"authors": [ |
|
{ |
|
"first": "Nicholas", |
|
"middle": [], |
|
"last": "Carlini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daphne", |
|
"middle": [], |
|
"last": "Ippolito", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Jagielski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katherine", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Florian", |
|
"middle": [], |
|
"last": "Tram\u00e8r", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chiyuan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nicholas Carlini, Daphne Ippolito, Matthew Jagielski, Katherine Lee, Florian Tram\u00e8r, and Chiyuan Zhang. 2022. Quantifying memorization across neural lan- guage models. ArXiv, abs/2202.07646.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Recognizing textual entailment: Models and applications", |
|
"authors": [ |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Ido Dagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fabio", |
|
"middle": [ |
|
"Massimo" |
|
], |
|
"last": "Sammons", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zanzotto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Synthesis Lectures on Human Language Technologies", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "1--220", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.2200/S00509ED1V01Y201305HLT023" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ido Dagan, Dan Roth, Mark Sammons, and Fabio Mas- simo Zanzotto. 2013. Recognizing textual entail- ment: Models and applications. Synthesis Lectures on Human Language Technologies, 6:1-220.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Bias in bios: A case study of semantic representation bias in a high-stakes setting", |
|
"authors": [ |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "De-Arteaga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexey", |
|
"middle": [], |
|
"last": "Romanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hanna", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Wallach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jennifer", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Chayes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Borgs", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Chouldechova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Conference on Fairness, Accountability, and Transparency", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"https://dl.acm.org/doi/10.1145/3287560.3287572" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maria De-Arteaga, Alexey Romanov, Hanna M. Wal- lach, Jennifer T. Chayes, Christian Borgs, Alexandra Chouldechova, Sahin Cem Geyik, Krishnaram Ken- thapadi, and Adam Tauman Kalai. 2019. Bias in bios: A case study of semantic representation bias in a high-stakes setting. Proceedings of the Conference on Fairness, Accountability, and Transparency.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Stereotype and skew: Quantifying gender bias in pre-trained and fine-tuned language models", |
|
"authors": [ |
|
{ |
|
"first": "Vassimon", |
|
"middle": [], |
|
"last": "Daniel De", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Manela", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Errington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Boris", |
|
"middle": [], |
|
"last": "Fisher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pasquale", |
|
"middle": [], |
|
"last": "Van Breugel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Minervini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2232--2242", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2021.eacl-main.190" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel de Vassimon Manela, David Errington, Thomas Fisher, Boris van Breugel, and Pasquale Minervini. 2021. Stereotype and skew: Quantifying gender bias in pre-trained and fine-tuned language models. In Proceedings of the 16th Conference of the European Chapter of the Association for Computational Lin- guistics: Main Volume, pages 2232-2242, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Documenting large webtext corpora: A case study on the colossal clean crawled corpus", |
|
"authors": [ |
|
{ |
|
"first": "Jesse", |
|
"middle": [], |
|
"last": "Dodge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maarten", |
|
"middle": [], |
|
"last": "Sap", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ana", |
|
"middle": [], |
|
"last": "Marasovi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Agnew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gabriel", |
|
"middle": [], |
|
"last": "Ilharco", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dirk", |
|
"middle": [], |
|
"last": "Groeneveld", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Margaret", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1286--1305", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2021.emnlp-main.98" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jesse Dodge, Maarten Sap, Ana Marasovi\u0107, William Agnew, Gabriel Ilharco, Dirk Groeneveld, Margaret Mitchell, and Matt Gardner. 2021. Documenting large webtext corpora: A case study on the colos- sal clean crawled corpus. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 1286-1305, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Deberta: decoding-enhanced bert with disentangled attention", |
|
"authors": [ |
|
{ |
|
"first": "Pengcheng", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weizhu", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "9th International Conference on Learning Representations, ICLR 2021, Virtual Event", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pengcheng He, Xiaodong Liu, Jianfeng Gao, and Weizhu Chen. 2021. Deberta: decoding-enhanced bert with disentangled attention. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenRe- view.net.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Examining gender and race bias in two hundred sentiment analysis systems", |
|
"authors": [ |
|
{ |
|
"first": "Svetlana", |
|
"middle": [], |
|
"last": "Kiritchenko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Saif", |
|
"middle": [], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Seventh Joint Conference on Lexical and Computational Semantics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "43--53", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/S18-2005" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Svetlana Kiritchenko and Saif Mohammad. 2018. Ex- amining gender and race bias in two hundred senti- ment analysis systems. In Proceedings of the Sev- enth Joint Conference on Lexical and Computational Semantics, pages 43-53, New Orleans, Louisiana. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Measuring bias in contextualized word representations", |
|
"authors": [ |
|
{ |
|
"first": "Keita", |
|
"middle": [], |
|
"last": "Kurita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nidhi", |
|
"middle": [], |
|
"last": "Vyas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ayush", |
|
"middle": [], |
|
"last": "Pareek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Black", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yulia", |
|
"middle": [], |
|
"last": "Tsvetkov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the First Workshop on Gender Bias in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "166--172", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-3823" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Keita Kurita, Nidhi Vyas, Ayush Pareek, Alan W Black, and Yulia Tsvetkov. 2019. Measuring bias in con- textualized word representations. In Proceedings of the First Workshop on Gender Bias in Natural Lan- guage Processing, pages 166-172, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Decoupled weight decay regularization", |
|
"authors": [ |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Loshchilov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frank", |
|
"middle": [], |
|
"last": "Hutter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "7th International Conference on Learning Representations, ICLR 2019", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ilya Loshchilov and Frank Hutter. 2019. Decoupled weight decay regularization. In 7th International Conference on Learning Representations, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenRe- view.net.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Data contamination: From memorization to exploitation", |
|
"authors": [ |
|
{ |
|
"first": "Inbal", |
|
"middle": [], |
|
"last": "Magar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roy", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Inbal Magar and Roy Schwartz. 2022. Data contamina- tion: From memorization to exploitation. In Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "On measuring social biases in sentence encoders", |
|
"authors": [ |
|
{ |
|
"first": "Chandler", |
|
"middle": [], |
|
"last": "May", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shikha", |
|
"middle": [], |
|
"last": "Bordia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Bowman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rachel", |
|
"middle": [], |
|
"last": "Rudinger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "622--628", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1063" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chandler May, Alex Wang, Shikha Bordia, Samuel R. Bowman, and Rachel Rudinger. 2019. On measuring social biases in sentence encoders. In Proceedings of the 2019 Conference of the North American Chap- ter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 622-628, Minneapolis, Min- nesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "StereoSet: Measuring stereotypical bias in pretrained language models", |
|
"authors": [ |
|
{ |
|
"first": "Moin", |
|
"middle": [], |
|
"last": "Nadeem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Bethke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siva", |
|
"middle": [], |
|
"last": "Reddy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "5356--5371", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2021.acl-long.416" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Moin Nadeem, Anna Bethke, and Siva Reddy. 2021. StereoSet: Measuring stereotypical bias in pretrained language models. In Proceedings of the 59th Annual Meeting of the Association for Computational Lin- guistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 5356-5371, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "CrowS-pairs: A challenge dataset for measuring social biases in masked language models", |
|
"authors": [ |
|
{ |
|
"first": "Nikita", |
|
"middle": [], |
|
"last": "Nangia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clara", |
|
"middle": [], |
|
"last": "Vania", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rasika", |
|
"middle": [], |
|
"last": "Bhalerao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1953--1967", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.emnlp-main.154" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nikita Nangia, Clara Vania, Rasika Bhalerao, and Samuel R. Bowman. 2020. CrowS-pairs: A chal- lenge dataset for measuring social biases in masked language models. In Proceedings of the 2020 Con- ference on Empirical Methods in Natural Language Processing (EMNLP), pages 1953-1967, Online. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Gendered language in resumes and its implications for algorithmic bias in hiring", |
|
"authors": [ |
|
{ |
|
"first": "Prasanna", |
|
"middle": [], |
|
"last": "Parasurama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jo\u00e3o", |
|
"middle": [], |
|
"last": "Sedoc", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Prasanna Parasurama and Jo\u00e3o Sedoc. 2021. Gendered language in resumes and its implications for algorith- mic bias in hiring.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Collecting diverse natural language inference problems for sentence representation evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Poliak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aparajita", |
|
"middle": [], |
|
"last": "Haldar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rachel", |
|
"middle": [], |
|
"last": "Rudinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"Edward" |
|
], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ellie", |
|
"middle": [], |
|
"last": "Pavlick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aaron", |
|
"middle": [ |
|
"Steven" |
|
], |
|
"last": "White", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "67--81", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1007" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Poliak, Aparajita Haldar, Rachel Rudinger, J. Ed- ward Hu, Ellie Pavlick, Aaron Steven White, and Benjamin Van Durme. 2018. Collecting diverse nat- ural language inference problems for sentence rep- resentation evaluation. In Proceedings of the 2018 Conference on Empirical Methods in Natural Lan- guage Processing, pages 67-81, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Language models are unsupervised multitask learners", |
|
"authors": [ |
|
{ |
|
"first": "Alec", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rewon", |
|
"middle": [], |
|
"last": "Child", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dario", |
|
"middle": [], |
|
"last": "Amodei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alec Radford, Jeff Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language models are unsupervised multitask learners.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Exploring the limits of transfer learning with a unified text-to-text transformer", |
|
"authors": [ |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Raffel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Roberts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katherine", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sharan", |
|
"middle": [], |
|
"last": "Narang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Matena", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanqi", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "21", |
|
"issue": "140", |
|
"pages": "1--67", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Colin Raffel, Noam Shazeer, Adam Roberts, Kather- ine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2020. Exploring the limits of transfer learning with a unified text-to-text transformer. Journal of Machine Learning Research, 21(140):1-67.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Gender bias in coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Rachel", |
|
"middle": [], |
|
"last": "Rudinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Naradowsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Leonard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "8--14", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-2002" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rachel Rudinger, Jason Naradowsky, Brian Leonard, and Benjamin Van Durme. 2018. Gender bias in coreference resolution. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 8-14, New Orleans, Louisiana. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Masked language model scoring", |
|
"authors": [ |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Salazar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Davis", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Toan", |
|
"middle": [ |
|
"Q" |
|
], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katrin", |
|
"middle": [], |
|
"last": "Kirchhoff", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2699--2712", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.240" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Julian Salazar, Davis Liang, Toan Q. Nguyen, and Ka- trin Kirchhoff. 2020. Masked language model scor- ing. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 2699-2712, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Evaluating gender bias in natural language inference", |
|
"authors": [ |
|
{ |
|
"first": "Shanya", |
|
"middle": [], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manan", |
|
"middle": [], |
|
"last": "Dey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Koustuv", |
|
"middle": [], |
|
"last": "Sinha", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shanya Sharma, Manan Dey, and Koustuv Sinha. 2021. Evaluating gender bias in natural language inference.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Adafactor: Adaptive learning rates with sublinear memory cost", |
|
"authors": [ |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mitchell", |
|
"middle": [], |
|
"last": "Stern", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 35th International Conference on Machine Learning, ICML 2018", |
|
"volume": "80", |
|
"issue": "", |
|
"pages": "4603--4611", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Noam Shazeer and Mitchell Stern. 2018. Adafactor: Adaptive learning rates with sublinear memory cost. In Proceedings of the 35th International Conference on Machine Learning, ICML 2018, Stockholmsm\u00e4s- san, Stockholm, Sweden, July 10-15, 2018, volume 80 of Proceedings of Machine Learning Research, pages 4603-4611. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Evaluating gender bias in machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Gabriel", |
|
"middle": [], |
|
"last": "Stanovsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1679--1684", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1164" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gabriel Stanovsky, Noah A. Smith, and Luke Zettle- moyer. 2019. Evaluating gender bias in machine translation. In Proceedings of the 57th Annual Meet- ing of the Association for Computational Linguistics, pages 1679-1684, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Investigating gender bias in language models using causal mediation analysis", |
|
"authors": [ |
|
{ |
|
"first": "Jesse", |
|
"middle": [], |
|
"last": "Vig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Gehrmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonatan", |
|
"middle": [], |
|
"last": "Belinkov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sharon", |
|
"middle": [], |
|
"last": "Qian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Nevo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yaron", |
|
"middle": [], |
|
"last": "Singer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stuart", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Shieber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jesse Vig, Sebastian Gehrmann, Yonatan Belinkov, Sharon Qian, Daniel Nevo, Yaron Singer, and Stu- art M. Shieber. 2020. Investigating gender bias in language models using causal mediation analysis. In Advances in Neural Information Processing Sys- tems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "BERT has a mouth, and it must speak: BERT as a Markov random field language model", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Workshop on Methods for Optimizing and Evaluating Neural Language Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "30--36", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-2304" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Wang and Kyunghyun Cho. 2019. BERT has a mouth, and it must speak: BERT as a Markov ran- dom field language model. In Proceedings of the Workshop on Methods for Optimizing and Evaluat- ing Neural Language Generation, pages 30-36, Min- neapolis, Minnesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Superglue: A stickier benchmark for general-purpose language understanding systems", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yada", |
|
"middle": [], |
|
"last": "Pruksachatkun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikita", |
|
"middle": [], |
|
"last": "Nangia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanpreet", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems 32: Annual Conference on Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3261--3275", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Wang, Yada Pruksachatkun, Nikita Nangia, Aman- preet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. 2019. Superglue: A stickier benchmark for general-purpose language understand- ing systems. In Advances in Neural Information Processing Systems 32: Annual Conference on Neu- ral Information Processing Systems 2019, NeurIPS 2019, December 8-14, 2019, Vancouver, BC, Canada, pages 3261-3275.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "A broad-coverage challenge corpus for sentence understanding through inference", |
|
"authors": [ |
|
{ |
|
"first": "Adina", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikita", |
|
"middle": [], |
|
"last": "Nangia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Figure 5: agreement and bias score measures for RoBERTa and T5 using the following prompt", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1112--1122", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-1101" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adina Williams, Nikita Nangia, and Samuel Bowman. 2018. A broad-coverage challenge corpus for sen- tence understanding through inference. In Proceed- ings of the 2018 Conference of the North American Figure 5: agreement and bias score measures for RoBERTa and T5 using the following prompt: \"Com- plete the sentence: [MASK] is a/an [OCCUPA- TION].\" 10 An increasing trend is observed for both families. Chapter of the Association for Computational Lin- guistics: Human Language Technologies, Volume 1 (Long Papers), pages 1112-1122, New Orleans, Louisiana. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Transformers: State-of-the-art natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clement", |
|
"middle": [], |
|
"last": "Delangue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Moi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierric", |
|
"middle": [], |
|
"last": "Cistac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Remi", |
|
"middle": [], |
|
"last": "Louf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Morgan", |
|
"middle": [], |
|
"last": "Funtowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joe", |
|
"middle": [], |
|
"last": "Davison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Shleifer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clara", |
|
"middle": [], |
|
"last": "Patrick Von Platen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yacine", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Jernite", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Canwen", |
|
"middle": [], |
|
"last": "Plu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Teven", |
|
"middle": [ |
|
"Le" |
|
], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sylvain", |
|
"middle": [], |
|
"last": "Scao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mariama", |
|
"middle": [], |
|
"last": "Gugger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Drame", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "38--45", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.emnlp-demos.6" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, Remi Louf, Morgan Funtow- icz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander Rush. 2020. Trans- formers: State-of-the-art natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Gender bias in coreference resolution: Evaluation and debiasing methods", |
|
"authors": [ |
|
{ |
|
"first": "Jieyu", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianlu", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Yatskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vicente", |
|
"middle": [], |
|
"last": "Ordonez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "15--20", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-2003" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jieyu Zhao, Tianlu Wang, Mark Yatskar, Vicente Or- donez, and Kai-Wei Chang. 2018. Gender bias in coreference resolution: Evaluation and debiasing methods. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 2 (Short Papers), pages 15-20, New Orleans, Louisiana. Association for Computational Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"text": "agreement and bias score measures for RoBERTa and T5 using the following prompt: \"[MASK] worked as a/an[OCCUPATION]", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"text": "Accuracy and parity scores on Winogender.", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"text": "Distribution of gender errors (stereotypical and anti-stereotypical) of different models on Winogender. Within all model families, larger models exhibit a higher stereotypical to anti-stereotypical errors ratio compared to smaller models.", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"TABREF0": { |
|
"text": "2, the Winogender pairs are divided", |
|
"content": "<table><tr><td>model</td><td>size</td><td colspan=\"3\">gender stereotypical anti-stereotypical</td></tr><tr><td>DeBERTa RoBERTa T5</td><td colspan=\"2\">base large xlarge 0.41 0.20 0.32 base 0.17 large 0.22 base 0.16 large 0.20 3B 0.17</td><td>0.17 0.29 0.41 0.11 0.21 0.09 0.15 0.16</td><td>0.03 0.03 0.00 0.06 0.01 0.07 0.05 0.01</td></tr></table>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null |
|
} |
|
} |
|
} |
|
} |