|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T01:02:35.172419Z" |
|
}, |
|
"title": "Gender and sentiment, critics and authors: a dataset of Norwegian book reviews", |
|
"authors": [ |
|
{ |
|
"first": "Samia", |
|
"middle": [], |
|
"last": "Touileb", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Language Technology Group", |
|
"institution": "University of Oslo", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Lilja", |
|
"middle": [], |
|
"last": "\u00d8vrelid", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Language Technology Group", |
|
"institution": "University of Oslo", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Velldal", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Language Technology Group", |
|
"institution": "University of Oslo", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Gender bias in models and datasets is widely studied in NLP. The focus has usually been on analysing how females and males express themselves, or how females and males are described. However, a less studied aspect is the combination of these two perspectives, how female and male describe the same or opposite gender. In this paper, we present a new gender annotated sentiment dataset of critics reviewing the works of female and male authors. We investigate if this newly annotated dataset contains differences in how the works of male and female authors are critiqued, in particular in terms of positive and negative sentiment. We also explore the differences in how this is done by male and female critics. We show that there are differences in how critics assess the works of authors of the same or opposite gender. For example, male critics rate crime novels written by females, and romantic and sentimental works written by males, more negatively.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Gender bias in models and datasets is widely studied in NLP. The focus has usually been on analysing how females and males express themselves, or how females and males are described. However, a less studied aspect is the combination of these two perspectives, how female and male describe the same or opposite gender. In this paper, we present a new gender annotated sentiment dataset of critics reviewing the works of female and male authors. We investigate if this newly annotated dataset contains differences in how the works of male and female authors are critiqued, in particular in terms of positive and negative sentiment. We also explore the differences in how this is done by male and female critics. We show that there are differences in how critics assess the works of authors of the same or opposite gender. For example, male critics rate crime novels written by females, and romantic and sentimental works written by males, more negatively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Gender is a widely studied source of bias in textual content (Garimella and Mihalcea, 2016; Schofield and Mehr, 2016; Kiritchenko and Mohammad, 2018) . There has been considerable previous work analyzing gender bias in NLP models and in particular, in input representations such as static and contextualized word embeddings (Kaneko and Bollegala, 2019; Friedman et al., 2019; Bolukbasi et al., 2016; Zhao et al., 2020; Basta et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 61, |
|
"end": 91, |
|
"text": "(Garimella and Mihalcea, 2016;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 92, |
|
"end": 117, |
|
"text": "Schofield and Mehr, 2016;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 118, |
|
"end": 149, |
|
"text": "Kiritchenko and Mohammad, 2018)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 324, |
|
"end": 352, |
|
"text": "(Kaneko and Bollegala, 2019;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 353, |
|
"end": 375, |
|
"text": "Friedman et al., 2019;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 376, |
|
"end": 399, |
|
"text": "Bolukbasi et al., 2016;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 400, |
|
"end": 418, |
|
"text": "Zhao et al., 2020;", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 419, |
|
"end": 438, |
|
"text": "Basta et al., 2019)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Gender-annotated datasets largely focus on the gender of the author of a specific piece of text, such as a blog (Mukherjee and Liu, 2010; Liu and Mihalcea, 2007) or a tweet (Burger et al., 2011) and has given rise to considerable research focused on author gender identification (Mukherjee and Liu, 2010; Rangel and Rosso, 2019) . Datasets which enable the study of response to gender in text, however, are considerably fewer (Voigt et al., 2018) . With a few noteworthy exceptions (Zhao et al., 2020; Sahlgren and Olsson, 2019) , a majority of previous work has focused on gender modeling and the study of gender bias in English.", |
|
"cite_spans": [ |
|
{ |
|
"start": 112, |
|
"end": 137, |
|
"text": "(Mukherjee and Liu, 2010;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 138, |
|
"end": 161, |
|
"text": "Liu and Mihalcea, 2007)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 173, |
|
"end": 194, |
|
"text": "(Burger et al., 2011)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 279, |
|
"end": 304, |
|
"text": "(Mukherjee and Liu, 2010;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 305, |
|
"end": 328, |
|
"text": "Rangel and Rosso, 2019)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 426, |
|
"end": 446, |
|
"text": "(Voigt et al., 2018)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 482, |
|
"end": 501, |
|
"text": "(Zhao et al., 2020;", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 502, |
|
"end": 528, |
|
"text": "Sahlgren and Olsson, 2019)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Social psychological research on gender bias in language has shown that there are sociocultural stereotypes inherent in the language used to describe females and males (Menegatti and Rubini, 2017) . While the descriptions of females tend to focus on their communal traits, males are described for their agentic traits (Menegatti and Rubini, 2017) . Madera et al. (2009) show that the gender of the writer can also influence how females and males are described. They show that gender stereotypes can discriminate female applicants in an academic setting, due to their recommendation letters which tend to contain more communal-related words, in contrary to letters written for males which focus more on their agentic abilities. Also, males in their recommendation letters, tend to describe the agentic traits of females more often than females do (Madera et al., 2009) . This makes explicit the need to investigate the gender of both sides: the writer, and the person being written about.", |
|
"cite_spans": [ |
|
{ |
|
"start": 168, |
|
"end": 196, |
|
"text": "(Menegatti and Rubini, 2017)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 318, |
|
"end": 346, |
|
"text": "(Menegatti and Rubini, 2017)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 349, |
|
"end": 369, |
|
"text": "Madera et al. (2009)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 846, |
|
"end": 867, |
|
"text": "(Madera et al., 2009)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This paper introduces a dataset of Norwegian book reviews with information about the gender of both the (professional) critic and the book author. Each review comes with a rating on a scale of 1-6, which can be used as a supervision signal for overall positive/negative sentiment of the text. As a part of describing the provided dataset, we include an exploratory analysis of the data through a series of empirical experiments on gender-and sentiment classification. The combination of gender information on two sides in addition to ratings allows for investigating several interesting research questions. Mainly, we here seek to address the following two closely related questions, mostly differing with respect to perspective:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 (R1.1) Are there differences in how the works of male and female authors are critiqued, and in particular in terms of positive and negative aspects? Moreover, (R1.2) are there differences in how this is done by male and female critics?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Conversely: (R2.1): How do male and female critics choose to word positive and negative criticism? Moreover, (R2.2) are there differences with respect to how they choose to do this with respect to the works of male and female authors?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "As a simplifying assumption, we only consider gender as a binary category (male and female) in this work. We acknowledge the fact that gender as an identity spans a wider spectrum than this, but this simplification was here deemed necessary to enable our annotation of the reviews. It is worth noting that during our manual annotation we did not come across any mention of known (to us) non-binary or transgender authors or critics. However, it is quite possible that some of the gender labels assigned would have been different had we been able to rely on self-identification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Bias statement: This work mainly attempts to shed light on whether there are latent biases inherent in the data directly, focusing on book reviews. As part of this, we investigate whether polarities associated with certain words to some degree are correlated with the gender of either the critic or the author (or both, in combination). One of our motivations is to assess whether the predictions of sentiment classifiers trained on review data -as is commonly the case -may to some degree depend on the gender of either the critics, the creator of the work being reviewed (the author), or both. By extension, and in terms of possible harms, the dataset we present here suffers from representational harms (Blodgett et al., 2020) . The book reviews present in the NoReC corpus, which are written by professional Norwegian critics, seem to contain gender stereotypes when describing the works of female and male authors. The societal lexical asymmetries in how females and males are portrayed is present in the language use. For example, words related to emotions and feelings are used negatively to describe the works of female authors, but positively when the works described are written by males. Also, words related to achievements with regards to literary genre or the process of publishing in general are positively used when describing the work of males, and negatively for the works of females. These observations seem to maintain the existing social hierarchies that tend to focus on emotional traits when describing females, while focusing on competence traits when describing males (Menegatti and Rubini, 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 706, |
|
"end": 729, |
|
"text": "(Blodgett et al., 2020)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1592, |
|
"end": 1620, |
|
"text": "(Menegatti and Rubini, 2017)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Much of the previous work on bias in ML models within NLP has focused on identifying biases in word embeddings and how to mitigate them (Maudslay et al., 2019; Kaneko and Bollegala, 2019; Zmigrod et al., 2019; Friedman et al., 2019; Garg et al., 2018; Bolukbasi et al., 2016) , or even make them gender neutral (Zhao et al., 2018b) . However, such efforts have received criticism by Gonen and Goldberg (2019) who argue that the biases have not been removed, but only \"hidden\" and kept at a deeper level in the embedding space. Bias has also been investigated in several other settings, like multilingual embeddings (Zhao et al., 2020) , deep contextual representations (Basta et al., 2019; May et al., 2019) , language models (Qian et al., 2019) , coreference resolution (Cao and Daum\u00e9 III, 2020; Zhao et al., 2018a; Rudinger et al., 2018) , and machine translation (Escud\u00e9 Font and Costa-juss\u00e0, 2019), just to name a few of the more recent efforts.", |
|
"cite_spans": [ |
|
{ |
|
"start": 136, |
|
"end": 159, |
|
"text": "(Maudslay et al., 2019;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 160, |
|
"end": 187, |
|
"text": "Kaneko and Bollegala, 2019;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 188, |
|
"end": 209, |
|
"text": "Zmigrod et al., 2019;", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 210, |
|
"end": 232, |
|
"text": "Friedman et al., 2019;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 233, |
|
"end": 251, |
|
"text": "Garg et al., 2018;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 252, |
|
"end": 275, |
|
"text": "Bolukbasi et al., 2016)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 311, |
|
"end": 331, |
|
"text": "(Zhao et al., 2018b)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 383, |
|
"end": 408, |
|
"text": "Gonen and Goldberg (2019)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 615, |
|
"end": 634, |
|
"text": "(Zhao et al., 2020)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 669, |
|
"end": 689, |
|
"text": "(Basta et al., 2019;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 690, |
|
"end": 707, |
|
"text": "May et al., 2019)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 726, |
|
"end": 745, |
|
"text": "(Qian et al., 2019)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 771, |
|
"end": 796, |
|
"text": "(Cao and Daum\u00e9 III, 2020;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 797, |
|
"end": 816, |
|
"text": "Zhao et al., 2018a;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 817, |
|
"end": 839, |
|
"text": "Rudinger et al., 2018)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Another line of work has focused on investigating gender representations in corpora and models, and release gender-neutral corpora (corpora in which either the distribution of genders is balanced, or where gender stereotypes and gendered words are removed). Schofield and Mehr (2016) use film scripts to analyse the linguistic and structure variations in dialogues and how these differ based on gender. Garimella and Mihalcea (2016) investigate gender biases and discrimination in blogposts. They use a metric to compute the salience of word classes combined with semantic and psycholinguistic resources to identify dominant word classes. These latter are used to uncover the underlying differences in the choice of word classes and concept usage between man and women. They show that the gender of a blog author can be identified using gender-based word disambiguation techniques, and that changes in word frequencies and contexts contribute to the differences between genders. Costa-juss\u00e0 et al. (2020) present a tool GeBioToolkit that automatically extracts multilingual parallel sentences using Wikipedia biographies from several languages, which also relies on gender information to create a gender-balanced corpus. They also introduce the multilingual, parallel and gender-balanced corpus GeBioCorpus, a corpus for machine translation applications covering the three languages Catalan, English, and Spanish.", |
|
"cite_spans": [ |
|
{ |
|
"start": 258, |
|
"end": 283, |
|
"text": "Schofield and Mehr (2016)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 403, |
|
"end": 432, |
|
"text": "Garimella and Mihalcea (2016)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Several studies have also focused on gender and gender bias in sentiment analysis. Kiritchenko and Mohammad (2018) present the Equity Evaluation Corpus (EEC) that contains a set of manually crafted English sentences, with the sole purpose to mitigate biases towards certain races and genders. They also show that using the EEC corpus helped uncover the existing biases in over two hundred sentiment analysis systems, which seemed to give higher sentiment predictions for sentences associated with one given race or gender.", |
|
"cite_spans": [ |
|
{ |
|
"start": 83, |
|
"end": 114, |
|
"text": "Kiritchenko and Mohammad (2018)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Hoyle et al. (2019) use a generative latent-variable model to represent collocations of positive and negative adjective and verb choices, given a gendered head noun. Their analyses goes beyond qualitative analysis, and shed light on the differences on how men and women are described differently. They use a corpus of books spanning various genres, and show for example that positive adjectives used to describe women are related to their bodies more often than is the case for men. Bhaskaran and Bhallamudi (2019) analyse the existence of occupational gender stereotypes in sentiment analysis models. They show that all their tested models (BOW+logistic regression, BiLSTM, BERT (Devlin et al., 2019) ) contain occupational gender stereotypes to some extent. They also show that simple models seem to show biases in training data, while contextual models might reflect biases introduced while pretraining. Voigt et al. (2018) present an annotated corpus for the gender of the addressee and the sentiment and relevance of comments. The corpus comprised comments from responses to Facebook and Reddit comments, TED talks, and posts on Fitocracy. This work has similarities to ours, since they look at the responses to gender e.g. how the content can differ based on the gender of the person being addressed. However, in our work we also add the dimension of the gender of the critic, and how it can positively or negatively affect the description of the book authors' gender.", |
|
"cite_spans": [ |
|
{ |
|
"start": 483, |
|
"end": 514, |
|
"text": "Bhaskaran and Bhallamudi (2019)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 680, |
|
"end": 701, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 907, |
|
"end": 926, |
|
"text": "Voigt et al. (2018)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In this paper, we do not focus on the differences in gender representations and biases present in existing systems, nor do we try to mitigate them. We rather investigate the differences in gender descriptions in Norwegian book reviews, and if this affects the ratings of the reviews. To this end we introduce a new dataset of rated reviews with meta-information about the gender of both critics and authors of the work under review. We focus on how positive and negative words can be informative for a simple machine learning model, and how these differ between genders.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The underlying source data in this study is the Norwegian Review Corpus (NoReC) comprising professional reviews across a wide variety of domains, collected from several of the major Norwegian news sources (Velldal et al., 2018) . Each review is rated with a numerical dice score on a scale from 1 to 6. In the current work, we only deal with the subset of 4,313 book reviews, for which we have extended the meta-information for each review to include manually coded information about gender -both of the critics and book authors. In what follows, we give an overview of our manual and semi-automatic annotation efforts, and provide the resulting corpus statistics.", |
|
"cite_spans": [ |
|
{ |
|
"start": 205, |
|
"end": 227, |
|
"text": "(Velldal et al., 2018)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Gender-coded book reviews", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We use two simple approaches to annotate the genders of critics and authors: (i) a semi-automated approach; use a list of male and female names and match them with the critics, the title, and the excerpt of each review, followed by manual correction, and (ii) a manual approach; examine titles, excerpts, and reviews to manually identify the authors being reviewed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotation process", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Authors For the identification of the gender of the book authors, we use a list of predefined male and female names 1 and perform a simple string matching against the title and excerpt of each review. We thereafter manually examine the identified authors and their genders. The list of names contains overlaps between genders, and some names can be both male and female names. In our data, we identified 95 critics and 178 authors that were automatically assigned both genders, these were manually adjudicated and corrected. Table 1 presents the total number of correctly identified authors and their genders using our semi-automatic approach, as well as the number of manually annotated names and their respective genders. An extensive manual analysis showed that our naive semi-automated approach correctly identified 1,324 authors and their genders in the review titles, and 367 in the excerpts. However, we had to manually correct 151 and 368 authors and genders respectively. Most of these corrections were due to mentions of book characters in titles and excerpts. For example, most of the reviews of Harry Potter books mention Harry Potter in the title or excerpt and not the author, J.K. Rowling.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 525, |
|
"end": 532, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Annotation process", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In addition to the above, we manually annotated 2,103 reviews, either by manually examining the titles and excerpts for names that do not exist in our lists, or by reading the reviews. During this annotation we identified 31 reviews written by children 2 , and 105 reviews reviewing books written by both male and female authors. Furthermore, we were not able to identify who the authors are in 69 reviews. These three categories (written by children, reviewing both male and female, and unknown authors) are not included in our investigations, as our main focus is to investigate the differences in reviews written about the works of male and female authors by professional male and female critics.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotation process", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Critics The names of the critics were already provided by the metadata of the NoReC corpus, and we use the semi-automated approach described above to identify their gender. We also performed a manual check of the whole corpus, and corrected the gender of 39 critics. During this process, some of the critics were identified as redaksjonen 'the editors'. A total of 343 of these were manually corrected after inspecting the online published version of the reviews. Still, there are 23 reviews written by unknown critics labeled as \"redaksjonen\" that could not be identified.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotation process", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Summarizing statistics In all of the following counts, we disregarded all reviews written by both a male and a female critics, written about both a male and a female author, written by children, and unknown critics. However, this information will be present in the released gender annotations of NoReC 3 . The final dataset comprises reviews written by 199 unique reviewers: 125 male and 74 female critics. These reviews rate the works of 2,317 unique book authors, from which 1,435 were written by males, and 882 by females. Figure 1: Distribution of ratings given by unique male and female critics to works of male and female authors. The first letter (M/F) indicates the gender of the critic and the second letter that of the author; e.g., FM plots ratings by female critics for male authors. The y axis represents normalized percentages of each rating. Table 2 shows the total document counts broken down along the gender of both the critics and the book author. We see that while the majority of reviews written by male critics targets the work of male book authors (73.73%), female critics tend to have a more balanced review distribution, with a small majority in reviewing female authors (51.81%).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 857, |
|
"end": 864, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Annotation process", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Another interesting aspect of the dataset is the distribution of ratings given by male and female critics. Figure 1 shows the normalized percentages of each rating, where the first letter (M/F) indicates the gender of the critic and the second letter indicates the gender of the author; e.g., MF corresponds to reviews by male critics of works by female authors. Here we observe a clear difference in the ratings given by female critics to female authors (FF). In general, female critics tend to give works by women lower ratings. Ratings 2, 3, and 4 were given by female to female on 4.28%, 14.09%, and 34.61% of the time. Compared to MM, MF, FM where rating 2 respectively represents 3.48%, 4.33%, and 2.90% of the total ratings. For rating 3 the trend is similar with 11.21% for MM, 13.32% for MF, and 12.72% for FM. Similarly, MM, MF, and FM gave rating 4 to respectively 30.60%, 30.01%, 30.18% of their reviews. On the upper range of the scale the trend is the opposite, with FF giving ratings 5 and 6 to respectively 37.54% and 8.90% of their reviews, compared to 42.67% and 11.67% for MM, 42.21% and 9.63% for MF, and 43.51% and 9.81% for FM.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 107, |
|
"end": 115, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Initial data analysis", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Regardless of sentiment, our first experiments were based on the language use in gender classification. We used our corpus for binary gender classification using Logistic Regression and cross validation, from both the authors' and the critics' perspectives. We opted for this simple classifier because it allows us to easily access the most informative features (in our case words) that guide the classification during training. We manually analysed the top 200 most informative words for each gender, and we were able to see that there were differences in the use of language in relation to gender, regardless of sentiment. However, in an effort to see if there are indeed differences in the language with regard to sentiment, we tested our gender classifiers on two different subsets: (i) a positive test set containing reviews with ratings 5 and 6, (ii) a negative test set comprising reviews with ratings 1, 2, and 3.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Gender Classification", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Our experiments as presented in Table 3 show that gender classification of authors yields higher accuracy for the negative reviews, while gender classification of critics show the opposite effect (higher accuracy for the positive reviews). More interestingly, looking at F1 values of both genders female (F) and male (M) of book authors, it is clear that our model is able to classify reviews about works of male authors in both positive and negative context, with a slightly higher accuracy for the negative test set. For reviews about the works of female authors our model is much better at identifying the gender in the negative test set. Conversely, in the classification of critics' genders, our model is better at classifying both female and male critics in the positive test set. Moreover, and as can be seen in table 3, our models perform better than the majority class baseline (classify all reviews as M) for both authors and critics.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 32, |
|
"end": 39, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Gender Classification", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Based on the observations presented in Section 4, our main interest in the current work is therefore to investigate if there are any differences in how the literary works of female and male authors are described in positive or negative reviews by female and male critics. In particular, we want to investigate whether the gender of the author or the critic affects the language use of the review and its rating. In order to do so, we train a number of models on differing critic-author combinations to predict the rating of a review, e.g. male critics reviewing female authors, male critics reviewing male authors, etc. We then go on to analyze the most informative words of these models using clustering over their word embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment Classification", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "For transparency and ease of interpretability, we first make use of machine learning models based on traditional approaches (i.e. with discrete and count-based features) that allows for straightforward extraction of the most informative features. We thereafter use word embeddings to cluster these features and identify representations of the content. To this end, we focus solely on the use of content words, i.e. adjectives, nouns, and verbs. These are already available in NoReC which is annotated with PoS tags using UDPipe (Velldal et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 528, |
|
"end": 550, |
|
"text": "(Velldal et al., 2018)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment Classification", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "To train our models, we create two subsets of our gender-annotated dataset: (i) a subset containing reviews reviewing female authors (R F ), and (ii) a subset of reviews reviewing male authors (R M ). Instead of looking at the full range of ratings as given in NoReC, we focus on the lowest and highest values of the rating range. The reason for this is that we want to analyze cases of clear positive or negative sentiments. We select all reviews with rating 1, 2, 3, and 6, and randomly select reviews with rating 5 to balance the distribution between the lower and higher ranges. Ratings 1, 2, and 3 represent negative reviews, while 5 and 6 are positive. These categories are consecutively used for binary sentiment classification using Logistic Regression and cross validation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment Classification", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In order to obtain a richer picture of the important features for classification, and how these differ between genders, we investigate the results of two different strategies for training using our genderannotated data:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment Classification", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Authors: We combine the train and dev splits within each of R F and R M for cross validation. Also, as previously mentioned, we balance the data within the splits such that the positive and negative classes are equally distributed. We thereafter analyse the results of four testing strategies:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment Classification", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "(1) train on R F train+dev, test on R F test, (2) train on R F train+dev, test on R M test, (3) train on R M train+dev, test on R M test, and (4) train on R M train+dev, test on R F test.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment Classification", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Critics: We follow the same steps as above, but run different models based on the gender of the critic. We add an additional dimension to the previous analysis by comparing the author and critic aspects. More concretely, we analyse results of four training combinations: (1) R F F : female critics, female authors, (2) R M F : male critics, female authors, (3) R F M : female critics, male authors, and (4) R M M : male critics, male authors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment Classification", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "For each of these strategies, we have manually analysed the 200 most informative words, and looked at the overlap between them. We provide additional details in Section 5.1 and Section 5.2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment Classification", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "As previously mentioned, we separate the reviews about female and male authors and create two subsets R F and R M . Then, we balance the number of positive and negative reviews, such that all reviews with ratings 1, 2, 3, and 6 are selected, and we select a random sample of the reviews with rating 5 to make the distribution of positive and negative labels balanced. Thereafter, for each of these subsets, we train three separate Logistic Regression models with cross validation for binary sentiment classification using as features the word counts of adjectives, verbs, and nouns of each review. Balancing the distribution of positive and negative labels in each of the subsets R F and R M considerably decreases the size of the data, which is already small to start with. We therefore run a 10-fold cross validation approach on the combined train and dev splits as identified in NoReC (for each of the subsets), and use the test split for final evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Authors", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "For each of the subsets R F and R M we first test on the test splits of the same subset (R F test and R M test respectively), and then on the test split of the opposite gender. Here, we do not focus on achieving the best accuracy, but rather on understanding what guided the model during classification. However, for the record, we did a simple grid search to identify the most suitable parameters (we focused on the parameters penalty, solver, and max-iter). We found that there are no obvious differences in the accuracy of models tested on data describing the same gender versus data describing the opposite gender. We therefore focus on the actual word usage.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Authors", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "For each subset, we identify the 200 most informative words during training. We believe that these words give insights into the classification process and can help us identify the differences between important words for the identifications of positive and negative reviews about female and male authors. Moreover, we cluster these 200 most informative words of each subset using pre-trained word embeddings 4 to identify the different clusters of words, which adds a second level of analysis to our investigation. We used the Silhouette method (Rousseeuw, 1987) to determine the optimal number of clusters, which was 25 clusters. We manually analysed these 25 cluster of words and labeled them as shown in Figure 2 . Using the information from the clusters, we analysed which adjectives, nouns, and verbs were positive in the R F but negative in R M , and vice versa. We also looked at which cluster of content words were positive and negative in both subsets R F and R M . These are presented in Figure 2 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 544, |
|
"end": 561, |
|
"text": "(Rousseeuw, 1987)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 706, |
|
"end": 714, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 997, |
|
"end": 1005, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Authors", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Most positive adjectives used to describe females, which also are negative when describing male authors are uplifting words (morsom 'funny', sjelden 'rare', utmerket 'excellent') and adjectives relating to quality characteristics (tydelig 'clear', rett 'right'). Also, adjectives describing emotions (r\u00f8rende 'touching', treffende 'aptly'), and socially critical and beliefs (filosofisk 'philosophical', historisk 'historical',), seems to be positive when describing female and negative for male. Most adjectives negatively used to describe the works of female authors and positively used for works by males are derogatory adjectives (kaotisk 'chaotic', mislykket 'unsuccessful'). These are in themselves negative words, but seem to be present in positive descriptions of work by male authors, which might either reflect that even if a book is unsuccessful, the male author might still be positively reviewed, or that unsuccessful events happening in a book might still be a positive aspect of the content of the book.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Authors", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Some quality characteristics (uventet 'unexpected') seem to also be negative for female but positive for male works descriptions. Description of literary genre (selvbiografisk 'autobiographical', skj\u00f8nnlitteraer 'fiction'), pain inducing (farlig 'dangerous', tragisk 'tragic') and emotional (dyster 'gloomy', vittig 'witty') are also negatively used to describe the works of female authors, while positive for works by males. Another interesting observation, is that adjectives related to violence are used to positively describe male, while they are negative for females (e.g. d\u00f8d 'death'). Nouns related to literary genre (klassiker 'a classic'), life-cycles and relationships (kone 'wife', s\u00f8ster 'sister'), and violence (offer 'victim') tend to be positive when describing books written by female authors. In contrast, nouns related to consequences (reaksjon 'reaction'), pain inducing descriptions (smerte 'pain', skyldf\u00f8lelse 'guilt'), and commercial (penger 'money'), but also literary genre (essay 'essay', dikting 'poetry') are negative for females and positive for males.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Authors", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "When it comes to verbs, the four clusters movements and travel, discourse and argumentation, consequences, and process seem to be used both positively and negatively when describing female and male authors. Verbs pertaining to mood and emotions (angre 'regret'), discourse and argumentation (avdekke 'uncover', snakke 'to talk'), reflect females positively and males negatively. Moreover, verbs associated with mind (evne 'ability', reflektere 'reflect') and violence (drepe 'kill', kidnappe 'kidnap') tend to reflect females' work as negative and males' work as positive. This might indicate that critics dislike crime fictions written by female authors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Authors", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We take the same splits R F and R M as in Section 5.1 and split them further based on the gender of the critic. This results in the four splits introduced in Section 5: R F F , R F M , R M M , and R M F . We once again balance the distribution of positive and negative reviews, by selecting all reviews with ratings 1, 2, and 3 as negative, all reviews with rating 6 as positive, and a random sample of reviews with rating 5 to make the distribution of positive and negative balanced.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Critics", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We follow the same steps introduced in Section 5.1, and train a simple Logistic Regression model with a 10-fold cross validation. We use the combination of train and dev splits for training, and keep the test split for final evaluation. We used different testing strategies to investigate whether the gender of the critic has a say on both the sentiment and the words used to describe the works of an author. These are:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Critics", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "(1) train on R F F , test on: R F F , R F M , R M M , R M F , (2) train on R F M , test on: R F M , R F F , R M M , R M F , (3) train on R M F , test on: R M F , R F F , R M M , R F M , (4) train on R M M , test on: R M M , R F F , R M F , R F M .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Critics", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We analysed the accuracy, macro F1, and class-level F1 of each of our testing strategies. However, as in the case of authors, we could not identify any considerable differences between the values. There were small nuances in the values, but making sense out of them was not trivial. We therefore rather focus on the differences in language use, and how this is reflected in the most informative words during training.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Critics", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "After training, we identify the 200 most informative words for each of the subsets R F F , R F M , R M M , and R M F . We use the same pre-trained word embeddings as in Section 5.1, and cluster the most informative adjectives, nouns, and verbs. We identified 30 clusters. Each of the clusters represent the theme or topic of the set of words it comprises. The themes were manually attributed after careful analysis of the clusters. These clusters are shown in Figure 3 . To analyse the differences in each of the subsets R F F , R F M , R M M , and R M F , we focus on what is positive (negative) for each subset, but negative (positive) in the other subsets. This allows us to see the distinctive word differences, and which clusters seems to dominate these differences. As can be seen in Figure 3 , the overall distribution of clusters seems to have small variations, but in what follows we show examples of the actual words that were used, and how these differ.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 460, |
|
"end": 468, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 790, |
|
"end": 798, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Critics", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Most words used negatively by female critics to describe female authors in short R F F , and which are positive in R F M , R M M , and R M F are related to assessment; where words like flink 'clever' and bra 'good' which in themselves are positive words are negatively used in R F F . Words like dramatikk 'dramatic', absurd 'absurd', trist 'sad', and h\u00e5pl\u00f8s 'hopeless' are representative of the cluster sad and strange, which female critics negatively employ to describe the works of female authors. Other interesting negatively representative words of the subset R F F are the words of the life-cycle and relationships cluster. The words barn, jente, gutt 'children, girl, boy', dame, mann 'woman, man', far 'dad', forelske 'fall in love', gift and gifte 'married' and 'get married' respectively, as well as f\u00f8de 'give birth' are positively used in the other subsets, but when female critics review female works, these seem to be negatively perceived. Moreover, some words with generally more positive connotations are representative of negativity in R F F , as e.g. stil 'style' and presis 'precise' (cluster quality), fascinere 'fascinate', fin 'nice', solid 'solid' (cluster uplifting), and elske 'love', and glad 'happy' (cluster feelings). Conversely, negative words that are also negatively used in R F F are related to violence as d\u00f8d 'dead', drepe 'kill', mord 'murder', and morder 'murderer'; and words related to contempt and cynical as selvopptatt 'selfish' and ulykkelig 'unhappy'.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Critics", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "On the other hand, words that are positively used by R F F while negatively used in the remaining subsets are mainly related to the clusters movement and change, and process and results. These exhibit some differences in how simple words can be mostly used to write about a given gender, and not another. The positive words from the life-cycle and relationships cluster are bror, s\u00f8ster 'brother, sister', venn, vennine 'friend(male and female)', and forelskelse 'infatuation'. While this cluster is also negatively used in (R F F ), the words are different. The words negatively used seem to be about advanced relationships, either with family members or love relationships (where getting married and having children seem to be negative), while in the positive R F F these words seems to be more about friendships, brothers and sisters, and early or short-term love interests. The same applies to the sad and strange clusters, which in positive (R F F ) comprises dramatisk 'dramatic', mystisk 'mysterious', and dyster 'gloomy'. Some of the positive qualities in R F F that are negative in the remaining subsets are humoristisk 'humorous', klasisk 'classical', poetisk 'poetic', and sentimental 'sentimental'. On the contrary, Some negative words from the cluster violence are also used positively blodig 'bloody', and d\u00f8 'dead'. Another interesting set of words that are positive in R F F but not elsewhere, are the words familieliv 'family life' and kjaerlighet 'love', which reflect the content of the cluster difficulties in romantic relationships.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Critics", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "In subset R F M , there is an interesting difference in positively and negatively words used from the cluster contempt and cynical. For example, words like desperat, hevn, l\u00f8gn 'desperate, vengeance, lie' are negatively used, while the more feeling oriented words are positive: ensom, hjertekjaerende, ulykkelig 'lonely, heartbreaking, unhappy'. When it comes to the feeling cluster, female critics use the words ambisjon, dr\u00f8mme 'ambition, dream' to negatively describe the work of male authors, while they use elske, glede 'love, joy' to positively describe works. As in the previous subset, words from life-cycle and relationships referring to marriage and wives are negatively used, while words referring to love and giving birth are positive. Coming from a female critic, this might be an indication for not adhering to the \"traditional\" views of relationships. This however, goes in contrast with the words from the political conflicts cluster, where words related to power and Christianity are positively used, while words related to rebellion and religious people are negative.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Critics", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Some of the same observations can be found in R M F . When it comes to difficulties in romantic relationships, discussing family life, sex, and physical relationships is seen as negative by male critics when discussing the work of females, while using words about love and gender are positive. However, in contrast to the two previous subsets, discussing marriage and fatherhood is positive, while talking about love and giving birth is negative. This difference is particularly interesting, because we can see the effect of having a male or female critic. Another compelling difference, is that male critics perceive female authors who write crime fictions to be negative, while works of female authors are positively described if they are from other genres (e.g. biographies and autobiographical books, novels and novel collections).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Critics", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "When male critics assess the work of male authors, they positively view works that are literary and poetic, but negatively describe biographies and prose. When it comes to difficulties in relationships, erotic works are positively seen, while those triggering anxieties are negative. Moreover, mentions of love, marriage, and children are actually perceived both positively and negatively, which is in contrast to the previous subsets where there was a clear difference in the polarity of early romance and stable relationships. Concerning political conflicts, works about Islam, Christianity, rebellions, and politics are negative, while those covering wars and power are positive. Another fascinating difference in this subset compared to the others, is that male critics who assess the work of male authors seems to be negative to romantic and sentimental books (romantisk, sentimental), while classics, witty and entertaining books, or books about music are deemed positive.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Critics", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "6 Non-professional reviews -the Bokelskere corpus", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Critics", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Our gender-annotated literature subset of the NoReC corpus reflects how works by female and male authors are positively and negatively described. However, since these reviews are written by professionals, the language can be expected to be of a more formal and possibly less affective style. In order to investigate to what extent this proves to be correct, we carried out the same analysis done on NoReC on a corpus of non-professional reviews.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Critics", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We use a corpus comprising user-generated book reviews from bokelskere.no compiled by the National Library of Norway (Spr\u00e5kbanken) 5 . We will refer to this corpus as the Bokelskere (book lovers) corpus in what follows. The Bokelskere corpus contains the raw texts from discussions and book reviews written by users of the bokelskere.no web community. The ratings follow the same scheme as in NoReC, with numerical scores ranging from 1 to 6. The reviews are structured as both reviews and comments on reviews. The corpus is in JSON format and contains a total of 219,000 review comments. For each of these, the corpus provides (amongst others) information about the book being reviewed (title and author), and the rating.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Critics", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We annotated the Bokelskere corpus with PoS tags using the same version of UDPipe that was used to annotate NoReC (Velldal et al., 2018) . Moreover, neither the gender of the users (i.e. critics), nor the gender of the book authors are provided in the Bokelskere corpus. We therefore used our annotations from the NoReC corpus to automatically annotate Bokelskere. We only annotate the authors from our gender-annotated corpus for whom we know the gender. We were able to identify the gender of 9,833 female authors, and 15,544 male authors. However, 1,691 and 2,815 reviews of female and male authors respectively did not contain ratings and were therefore disregarded. Figure 4 gives an overview of the rating distributions of the remaining 8,142 female and 12,729 male reviews. The trend is similar to the distribution of ratings in the gender-annotated NoReC. Female authors tend to get more ratings on the lower range than male authors, while it is the opposite on the higher range. For ratings 1, 2, 3, and 4 female authors are given respectively 2.2%, 4.5%, 10.4% , and 22.6% of the total reviews, and male authors are given 1.5%, 4%, 8.9% , and 22% respectively. On the contrary, for ratings 5 and 6 female authors are respectively given 37.6%, and 22.5% of total ratings, while male are given 38.7%, and 24.7%. Since the gender of the users reviewing books is not available for the Bokelskere corpus, we focus our analysis on the gender of the reviewed authors and follow the same methodology described in Section 5.1. The Bookelskere corpus do not have predefined train, dev, and test splits. We therefore follow the same strategy as for the splits in NoReC by first sorting the reviews by date and then reserving the first 80% for training, the following 10% for dev split, and the remaining 10% for testing. However, during this work, we combine the train and dev splits, and train a Logistic regression with 10-fold cross validation, and do a final evaluation on the test split. We also balance the distribution of positive and negative reviews for each of the genders.", |
|
"cite_spans": [ |
|
{ |
|
"start": 114, |
|
"end": 136, |
|
"text": "(Velldal et al., 2018)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 671, |
|
"end": 679, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Critics", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Clustering the 200 most informative words for binary sentiment classification on Bokelskere, enabled us to identify 20 clusters. These are shown in Figure 5 . The distribution of overlap of these clusters based on how often they are used to positively or negatively describe books written by female or male authors also offers an interesting overview (see Figure 5) .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 148, |
|
"end": 156, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 356, |
|
"end": 365, |
|
"text": "Figure 5)", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Critics", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "When it comes to adjectives, words that are positively used for female authors but negatively used to describe the works of male authors are mostly related to quality and nationality (mild 'mild', solid 'solid', engelsk 'English', fransk 'French'). But also words related to the expression of emotions as passion (inderlig 'sincerely'), praise (begeistre 'exciting'), and general descriptions (komisk 'comical'). Adjectives that are negatively used to describe the works of females, but positively used for male, tend also to relate to quality (\u00e5penbar 'obvious', personlig 'personal', realistisk 'realistic'), but also development (paralell 'parallel', tilgjengelig 'available'), praise (imponere 'impress', positiv 'positive'), derogatory (h\u00e5pl\u00f8s 'hopeless', vond 'bad') and uplifting words (g\u00f8y 'fun', inspirere 'inspire').", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Critics", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The nouns venn 'friend' (related to life-cycle and relationships) is used to positively describe female works and negatively describe male works, while the words datter 'daughter' and s\u00f8ster 'sister' are negative in female description but positive for male descriptions. Nouns related to crimes seems also to be positive for male, while negative for females (e.g. gjerningsmann 'perpetrator'). Most verbs used to positively describe females' while negatively describe males' works are development (sammenligne 'compare', presentere 'present'), and evaluation (forestille 'imagine', oppfatte 'perceive'). Conversely, verbs used negatively when describing works of females but positively for males seem to be related to completion (ende 'end', gjennomf\u00f8re 'conduct'), development (lage 'make'), evaluation (forst\u00e5 'understand'), life-cycle and relationships (f\u00f8de 'give birth', gifte 'marry'), and passion (forelske 'fall in love').", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Critics", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We present a gender-annotated dataset of professional book reviews, where both the gender of critics and the book authors are annotated. We also present a corpus of user reviews annotated for the gender of the book authors. We make all annotations and reviews publicly available. We have shown that there are differences in how female and male book authors are positively or negatively described, and that the gender of the critics influences the differences. For example, male critics deem female crime novels and male romantic and sentimental books as negative. This shows that book reviews contain the social hierarchies that tend to focus on emotional traits to describe females as in Menegatti and Rubini (2017) . There are several ways in which the preliminary analysis of the current work can be improved and extended. First, the annotations of the book authors are based on which book is being reviewed, and not if the author is being mentioned in the review. This can lead to issues during classification, since it might be possible that the review in itself contains references to the characters of the book, which might or might not be of the same gender as the author. Therefore, the word usage might not actually reflect the book author, but rather the fictional characters of the book. Secondly, we were able to identify differences in how female and male critics describe the works of female and male authors, but we did not quantify to which degree this is true. The distribution of ratings gives an indication of this, but more extensive analysis is necessary. In future works, we aim to explore how to quantify the amount of bias, but also identify if a review is discussing the quality of the book (as in the work of the author), or if it only focuses on the characters and the storyline.", |
|
"cite_spans": [ |
|
{ |
|
"start": 689, |
|
"end": 716, |
|
"text": "Menegatti and Rubini (2017)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and limitations", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "http://clarino.uib.no/iness/page?page-id=Resources 2 Some sources in NoReC have books reviewed by children and teenagers who have the appropriate age levels for the books. 3 https://github.com/ltgoslo/norec_gender", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Model 2 from http://vectors.nlpl.eu/repository/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The corpus can be found here: https://www.nb.no/sprakbanken/en/resource-catalogue/ oai-nb-no-sbr-53/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work has been carried out as part of the SANT project (Sentiment Analysis for Norwegian Text), funded by the Research Council of Norway (grant number 270908).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Evaluating the underlying gender bias in contextualized word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Christine", |
|
"middle": [], |
|
"last": "Basta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marta", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Costa-Juss\u00e0", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noe", |
|
"middle": [], |
|
"last": "Casas", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 1st Workshop on Gender Bias in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "33--39", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christine Basta, Marta R Costa-Juss\u00e0, and Noe Casas. 2019. Evaluating the underlying gender bias in contextual- ized word embeddings. In Proceedings of the 1st Workshop on Gender Bias in Natural Language Processing, pages 33-39, Florence, Italy, August. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Good secretaries, bad truck drivers? occupational gender stereotypes in sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Jayadev", |
|
"middle": [], |
|
"last": "Bhaskaran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Isha", |
|
"middle": [], |
|
"last": "Bhallamudi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the First Workshop on Gender Bias in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "62--68", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jayadev Bhaskaran and Isha Bhallamudi. 2019. Good secretaries, bad truck drivers? occupational gender stereo- types in sentiment analysis. In Proceedings of the First Workshop on Gender Bias in Natural Language Pro- cessing, pages 62-68, Florence, Italy, August. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Language (technology) is power: A critical survey of \"bias\" in NLP", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Su Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Solon", |
|
"middle": [], |
|
"last": "Blodgett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hal", |
|
"middle": [], |
|
"last": "Barocas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iii", |
|
"middle": [], |
|
"last": "Daum\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hanna", |
|
"middle": [], |
|
"last": "Wallach", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5454--5476", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Su Lin Blodgett, Solon Barocas, Hal Daum\u00e9 III, and Hanna Wallach. 2020. Language (technology) is power: A critical survey of \"bias\" in NLP. In Proceedings of the 58th Annual Meeting of the Association for Computa- tional Linguistics, pages 5454-5476, Online, July. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Man is to computer programmer as woman is to homemaker? debiasing word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Tolga", |
|
"middle": [], |
|
"last": "Bolukbasi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "James", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Venkatesh", |
|
"middle": [], |
|
"last": "Zou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Saligrama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kalai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4349--4357", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tolga Bolukbasi, Kai-Wei Chang, James Y Zou, Venkatesh Saligrama, and Adam T Kalai. 2016. Man is to com- puter programmer as woman is to homemaker? debiasing word embeddings. In Advances in neural information processing systems, pages 4349-4357.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Discriminating gender on twitter", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "John", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Burger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "George", |
|
"middle": [], |
|
"last": "Henderson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guido", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zarrella", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1301--1309", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John D Burger, John Henderson, George Kim, and Guido Zarrella. 2011. Discriminating gender on twitter. In Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing, pages 1301-1309.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Toward gender-inclusive coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trista", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hal", |
|
"middle": [], |
|
"last": "Daum\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iii", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4568--4595", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang Trista Cao and Hal Daum\u00e9 III. 2020. Toward gender-inclusive coreference resolution. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 4568-4595, Online, July. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "GeBioToolkit: Automatic extraction of gender-balanced multilingual corpus of Wikipedia biographies", |
|
"authors": [ |
|
{ |
|
"first": "Marta", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Costa-Juss\u00e0", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pau", |
|
"middle": [], |
|
"last": "Li Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cristina", |
|
"middle": [], |
|
"last": "Espa\u00f1a-Bonet", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of The 12th Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4081--4088", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marta R. Costa-juss\u00e0, Pau Li Lin, and Cristina Espa\u00f1a-Bonet. 2020. GeBioToolkit: Automatic extraction of gender-balanced multilingual corpus of Wikipedia biographies. In Proceedings of The 12th Language Re- sources and Evaluation Conference, pages 4081-4088, Marseille, France, May. European Language Resources Association.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of NAACL-HLT 2019", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidi- rectional transformers for language understanding. In Proceedings of NAACL-HLT 2019, page 4171-4186, Minneapolis, Minnesota, June. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Equalizing gender bias in neural machine translation with word embeddings techniques", |
|
"authors": [ |
|
{ |
|
"first": "Joel", |
|
"middle": [ |
|
"Escud\u00e9" |
|
], |
|
"last": "Font", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marta", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Costa-Juss\u00e0", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the First Workshop on Gender Bias in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "147--154", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joel Escud\u00e9 Font and Marta R. Costa-juss\u00e0. 2019. Equalizing gender bias in neural machine translation with word embeddings techniques. In Proceedings of the First Workshop on Gender Bias in Natural Language Processing, pages 147-154, Florence, Italy, August. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Relating word embedding gender biases to gender gaps: A cross-cultural analysis", |
|
"authors": [ |
|
{ |
|
"first": "Scott", |
|
"middle": [], |
|
"last": "Friedman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sonja", |
|
"middle": [], |
|
"last": "Schmer-Galunder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Rye", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the First Workshop on Gender Bias in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "18--24", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Scott Friedman, Sonja Schmer-Galunder, Anthony Chen, and Jeffrey Rye. 2019. Relating word embedding gender biases to gender gaps: A cross-cultural analysis. In Proceedings of the First Workshop on Gender Bias in Natural Language Processing, pages 18-24, Florence, Italy, August. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Word embeddings quantify 100 years of gender and ethnic stereotypes", |
|
"authors": [ |
|
{ |
|
"first": "Nikhil", |
|
"middle": [], |
|
"last": "Garg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Londa", |
|
"middle": [], |
|
"last": "Schiebinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Zou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the National Academy of Sciences", |
|
"volume": "115", |
|
"issue": "16", |
|
"pages": "3635--3644", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nikhil Garg, Londa Schiebinger, Dan Jurafsky, and James Zou. 2018. Word embeddings quantify 100 years of gender and ethnic stereotypes. Proceedings of the National Academy of Sciences, 115(16):E3635-E3644.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Zooming in on gender differences in social media", |
|
"authors": [ |
|
{ |
|
"first": "Aparna", |
|
"middle": [], |
|
"last": "Garimella", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Workshop on Computational Modeling of People's Opinions, Personality, and Emotions in Social Media (PEOPLES)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--10", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aparna Garimella and Rada Mihalcea. 2016. Zooming in on gender differences in social media. In Proceedings of the Workshop on Computational Modeling of People's Opinions, Personality, and Emotions in Social Media (PEOPLES), pages 1-10, Osaka, Japan, December. The COLING 2016 Organizing Committee.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Lipstick on a pig: Debiasing methods cover up systematic gender biases in word embeddings but do not remove them", |
|
"authors": [ |
|
{ |
|
"first": "Hila", |
|
"middle": [], |
|
"last": "Gonen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "609--614", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hila Gonen and Yoav Goldberg. 2019. Lipstick on a pig: Debiasing methods cover up systematic gender biases in word embeddings but do not remove them. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 609-614, Minneapolis, Minnesota, June. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Unsupervised discovery of gendered language through latent-variable modeling", |
|
"authors": [ |
|
{ |
|
"first": "Alexander Miserlis", |
|
"middle": [], |
|
"last": "Hoyle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lawrence", |
|
"middle": [], |
|
"last": "Wolf-Sonkin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hanna", |
|
"middle": [], |
|
"last": "Wallach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Isabelle", |
|
"middle": [], |
|
"last": "Augenstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Cotterell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexander Miserlis Hoyle, Lawrence Wolf-Sonkin, Hanna Wallach, Isabelle Augenstein, and Ryan Cotterell. 2019. Unsupervised discovery of gendered language through latent-variable modeling. In Proceedings of the 57th", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Association for Computational Linguistics", |
|
"authors": [], |
|
"year": null, |
|
"venue": "Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1706--1716", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Annual Meeting of the Association for Computational Linguistics, pages 1706-1716, Florence, Italy, July. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Gender-preserving debiasing for pre-trained word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Masahiro", |
|
"middle": [], |
|
"last": "Kaneko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danushka", |
|
"middle": [], |
|
"last": "Bollegala", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1641--1650", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Masahiro Kaneko and Danushka Bollegala. 2019. Gender-preserving debiasing for pre-trained word embeddings. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 1641-1650, Florence, Italy, July. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Examining gender and race bias in two hundred sentiment analysis systems", |
|
"authors": [ |
|
{ |
|
"first": "Svetlana", |
|
"middle": [], |
|
"last": "Kiritchenko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Saif", |
|
"middle": [], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Seventh Joint Conference on Lexical and Computational Semantics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "43--53", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Svetlana Kiritchenko and Saif Mohammad. 2018. Examining gender and race bias in two hundred sentiment analysis systems. In Proceedings of the Seventh Joint Conference on Lexical and Computational Semantics, pages 43-53, New Orleans, Louisiana, June. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Of men, women, and computers: Data-driven gender modeling for improved user interfaces", |
|
"authors": [ |
|
{ |
|
"first": "Hugo", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "ICWSM", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "26--28", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hugo Liu and Rada Mihalcea. 2007. Of men, women, and computers: Data-driven gender modeling for improved user interfaces. ICWSM, 7:26-28.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Gender and letters of recommendation for academia: agentic and communal differences", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Juan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michelle", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Madera", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Randi C", |
|
"middle": [], |
|
"last": "Hebl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Martin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Journal of Applied Psychology", |
|
"volume": "94", |
|
"issue": "6", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Juan M Madera, Michelle R Hebl, and Randi C Martin. 2009. Gender and letters of recommendation for academia: agentic and communal differences. Journal of Applied Psychology, 94(6):1591.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "It's all in the name: Mitigating gender bias with name-based counterfactual data substitution", |
|
"authors": [ |
|
{ |
|
"first": "Hila", |
|
"middle": [], |
|
"last": "Rowan Hall Maudslay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Gonen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simone", |
|
"middle": [], |
|
"last": "Cotterell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Teufel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5267--5275", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rowan Hall Maudslay, Hila Gonen, Ryan Cotterell, and Simone Teufel. 2019. It's all in the name: Mitigating gender bias with name-based counterfactual data substitution. In Proceedings of the 2019 Conference on Empir- ical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing, pages 5267-5275, Hong Kong, China, November. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "On measuring social biases in sentence encoders", |
|
"authors": [ |
|
{ |
|
"first": "Chandler", |
|
"middle": [], |
|
"last": "May", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shikha", |
|
"middle": [], |
|
"last": "Bordia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Bowman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rachel", |
|
"middle": [], |
|
"last": "Rudinger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "622--628", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chandler May, Alex Wang, Shikha Bordia, Samuel R. Bowman, and Rachel Rudinger. 2019. On measuring social biases in sentence encoders. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 622-628, Minneapolis, Minnesota, June. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Gender bias and sexism in language", |
|
"authors": [ |
|
{ |
|
"first": "Michela", |
|
"middle": [], |
|
"last": "Menegatti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Monica", |
|
"middle": [], |
|
"last": "Rubini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Oxford Research Encyclopedia of Communication", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michela Menegatti and Monica Rubini. 2017. Gender bias and sexism in language. In Oxford Research Encyclo- pedia of Communication.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Improving gender classification of blog authors", |
|
"authors": [ |
|
{ |
|
"first": "Arjun", |
|
"middle": [], |
|
"last": "Mukherjee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 2010 conference on Empirical Methods in natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "207--217", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arjun Mukherjee and Bing Liu. 2010. Improving gender classification of blog authors. In Proceedings of the 2010 conference on Empirical Methods in natural Language Processing, pages 207-217.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Reducing gender bias in word-level language models with a gender-equalizing loss function", |
|
"authors": [ |
|
{ |
|
"first": "Yusu", |
|
"middle": [], |
|
"last": "Qian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Urwa", |
|
"middle": [], |
|
"last": "Muaz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jae", |
|
"middle": [ |
|
"Won" |
|
], |
|
"last": "Hyun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "223--228", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yusu Qian, Urwa Muaz, Ben Zhang, and Jae Won Hyun. 2019. Reducing gender bias in word-level language models with a gender-equalizing loss function. pages 223-228, July.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Overview of the 7th author profiling task at pan 2019: Bots and gender profiling in twitter", |
|
"authors": [ |
|
{ |
|
"first": "Francisco", |
|
"middle": [], |
|
"last": "Rangel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paolo", |
|
"middle": [], |
|
"last": "Rosso", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the CEUR Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--36", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Francisco Rangel and Paolo Rosso. 2019. Overview of the 7th author profiling task at pan 2019: Bots and gender profiling in twitter. In Proceedings of the CEUR Workshop, Lugano, Switzerland, pages 1-36.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Silhouettes: a graphical aid to the interpretation and validation of cluster analysis", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Rousseeuw", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1987, |
|
"venue": "Journal of computational and applied mathematics", |
|
"volume": "20", |
|
"issue": "", |
|
"pages": "53--65", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter J Rousseeuw. 1987. Silhouettes: a graphical aid to the interpretation and validation of cluster analysis. Journal of computational and applied mathematics, 20:53-65.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Gender bias in coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Rachel", |
|
"middle": [], |
|
"last": "Rudinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Naradowsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Leonard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "8--14", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rachel Rudinger, Jason Naradowsky, Brian Leonard, and Benjamin Van Durme. 2018. Gender bias in coreference resolution. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Com- putational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 8-14, New Orleans, Louisiana, June. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Gender bias in pretrained Swedish embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Magnus", |
|
"middle": [], |
|
"last": "Sahlgren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fredrik", |
|
"middle": [], |
|
"last": "Olsson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 22nd Nordic Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "35--43", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Magnus Sahlgren and Fredrik Olsson. 2019. Gender bias in pretrained Swedish embeddings. In Proceedings of the 22nd Nordic Conference on Computational Linguistics, pages 35-43, Turku, Finland, September-October. Link\u00f6ping University Electronic Press.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Gender-distinguishing features in film dialogue", |
|
"authors": [ |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Schofield", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leo", |
|
"middle": [], |
|
"last": "Mehr", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Fifth Workshop on Computational Linguistics for Literature", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "32--39", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexandra Schofield and Leo Mehr. 2016. Gender-distinguishing features in film dialogue. In Proceedings of the Fifth Workshop on Computational Linguistics for Literature, pages 32-39, San Diego, California, USA, June. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "NoReC: The Norwegian Review Corpus", |
|
"authors": [ |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Velldal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lilja", |
|
"middle": [], |
|
"last": "\u00d8vrelid", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cathrine", |
|
"middle": [], |
|
"last": "Stadsnes Eivind", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Bergem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samia", |
|
"middle": [], |
|
"last": "Touileb", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fredrik", |
|
"middle": [], |
|
"last": "J\u00f8rgensen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 11th edition of the Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4186--4191", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erik Velldal, Lilja \u00d8vrelid, Cathrine Stadsnes Eivind Alexander Bergem, Samia Touileb, and Fredrik J\u00f8rgensen. 2018. NoReC: The Norwegian Review Corpus. In Proceedings of the 11th edition of the Language Resources and Evaluation Conference, pages 4186-4191, Miyazaki, Japan.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Rtgender: A corpus for studying differential responses to gender", |
|
"authors": [ |
|
{ |
|
"first": "Rob", |
|
"middle": [], |
|
"last": "Voigt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Jurgens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vinodkumar", |
|
"middle": [], |
|
"last": "Prabhakaran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yulia", |
|
"middle": [], |
|
"last": "Tsvetkov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rob Voigt, David Jurgens, Vinodkumar Prabhakaran, Dan Jurafsky, and Yulia Tsvetkov. 2018. Rtgender: A corpus for studying differential responses to gender. In Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018).", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Gender bias in coreference resolution: Evaluation and debiasing methods", |
|
"authors": [ |
|
{ |
|
"first": "Jieyu", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianlu", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Yatskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vicente", |
|
"middle": [], |
|
"last": "Ordonez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "15--20", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jieyu Zhao, Tianlu Wang, Mark Yatskar, Vicente Ordonez, and Kai-Wei Chang. 2018a. Gender bias in coreference resolution: Evaluation and debiasing methods. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 15-20, New Orleans, Louisiana, June. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Learning gender-neutral word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Jieyu", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yichao", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zeyu", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4847--4853", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jieyu Zhao, Yichao Zhou, Zeyu Li, Wei Wang, and Kai-Wei Chang. 2018b. Learning gender-neutral word embed- dings. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 4847-4853, Brussels, Belgium, October-November. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Gender bias in multilingual embeddings and cross-lingual transfer", |
|
"authors": [ |
|
{ |
|
"first": "Jieyu", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Subhabrata", |
|
"middle": [], |
|
"last": "Mukherjee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Hosseini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ahmed", |
|
"middle": [ |
|
"Hassan" |
|
], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Awadallah", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2896--2907", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jieyu Zhao, Subhabrata Mukherjee, saghar Hosseini, Kai-Wei Chang, and Ahmed Hassan Awadallah. 2020. Gen- der bias in multilingual embeddings and cross-lingual transfer. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 2896-2907, Online, July. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Counterfactual data augmentation for mitigating gender stereotypes in languages with rich morphology", |
|
"authors": [ |
|
{ |
|
"first": "Ran", |
|
"middle": [], |
|
"last": "Zmigrod", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sabrina", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Mielke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hanna", |
|
"middle": [], |
|
"last": "Wallach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Cotterell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1651--1661", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ran Zmigrod, Sabrina J. Mielke, Hanna Wallach, and Ryan Cotterell. 2019. Counterfactual data augmentation for mitigating gender stereotypes in languages with rich morphology. In Proceedings of the 57th Annual Meet- ing of the Association for Computational Linguistics, pages 1651-1661, Florence, Italy, July. Association for Computational Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "" |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Distribution of clusters of most informative words for sentiment classification in R F F , R F M , R M M , and R M F in the gender-annotated NoReC Corpus." |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Distribution of ratings given to Female (F) and male (M) authors in the Bokelskere corpus. The y axis represents normalized percentages of each rating." |
|
}, |
|
"FIGREF3": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Distribution of clusters of most informative words for sentiment classification in F subset and M subset in Bokelskere Corpus." |
|
}, |
|
"TABREF1": { |
|
"text": "Annotation process summary.", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF3": { |
|
"text": "Total counts of reviews by gender.", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td>Authors</td><td>Critics</td></tr><tr><td colspan=\"2\">pos neg pos neg</td></tr><tr><td colspan=\"2\">Acc 0.83 0.95 0.86 0.69</td></tr><tr><td colspan=\"2\">MC 0.57 0.55 0.60 0.44</td></tr><tr><td colspan=\"2\">F1 M 0.86 0.95 0.89 0.70</td></tr><tr><td colspan=\"2\">F1 F 0.79 0.94 0.82 0.68</td></tr></table>" |
|
}, |
|
"TABREF4": { |
|
"text": "Accuracy of gender classification of authors and critics. Here, pos and neg represent to which polarity the test set belongs. F1 M and F1 F represent class-level F1 scores. MC represents majority class values.", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF5": { |
|
"text": "Distribution of clusters of most informative words for sentiment classification in R F and R M in NoReC.", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td>Normalized # important features from each cluster</td><td>derogatory uplifting violence life course, relationships mastery, expectation quality characteristics surroundings mind managment mood, emotions literature genre misc timing, outcome movement, travel publishing pain inducing discourse, argumentation composition consequences socially critical, beliefs nynorsk nationality, ethnicity, ideology emotionality process commercial</td><td>Normalized # important features from each cluster</td><td>20 40 60 80 100</td><td/><td/><td/><td/><td>nynorsk nature assessment political conflicts surroundings reports and notes discourse quality contempt and cynical feelings mastery and challenges discussion and dialogue uplifting depiction sad and strange literature genre movement and change success and accomplishments body, food, belongings difficulties romantic relationships author and work charachteristics nationality life-cycle and relationships guidelines process and results violence quantity or amount thoughts importance and outcomes commercial and business</td></tr><tr><td colspan=\"4\">Figure 2: FFneg_FM+MF+MMpos 0</td><td>FFpos_FM+MF+MMneg</td><td>FMneg_FF+MF+MMpos</td><td>FMpos_FF+MF+MMneg</td><td>MFneg_FF+FM+MMpos</td><td>MFpos_FF+FM+MMneg</td><td>MMneg_FF+FM+MFpos</td><td>MMpos_FF+FM+MFneg</td></tr></table>" |
|
} |
|
} |
|
} |
|
} |