|
{ |
|
"paper_id": "2022", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T01:02:18.818765Z" |
|
}, |
|
"title": "Indigenous Language Revitalization and the Dilemma of Gender Bias", |
|
"authors": [ |
|
{ |
|
"first": "Oussama", |
|
"middle": [], |
|
"last": "Hansal", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Universit\u00e9 du Qu\u00e9bec \u00e0 Montr\u00e9al", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Ngoc", |
|
"middle": [ |
|
"Tan" |
|
], |
|
"last": "Le", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Universit\u00e9 du Qu\u00e9bec \u00e0", |
|
"location": { |
|
"settlement": "Montr\u00e9al" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Fatiha", |
|
"middle": [], |
|
"last": "Sadat", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Universit\u00e9 du Qu\u00e9bec \u00e0", |
|
"location": { |
|
"settlement": "Montr\u00e9al" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Natural Language Processing (NLP), through its several applications, has been considered as one of the most valuable field in interdisciplinary researches, as well as in computer science. However, it is not without its flaws. One of the most common flaws is bias. This paper examines the main linguistic challenges of Inuktitut, an indigenous language of Canada, and focuses on gender bias identification and mitigation. We explore the unique characteristics of this language to help us understand the right techniques that can be used to identify and mitigate implicit biases. We use some methods to quantify the gender bias existing in Inuktitut word embeddings; then we proceed to mitigate the bias and evaluate the performance of the debiased embeddings. Next, we explain how approaches for detecting and reducing bias in English embeddings may be transferred to Inuktitut embeddings by properly taking into account the language's particular characteristics. We compare the effect of the debiasing techniques on Inuktitut and English. Finally, we highlight some future research directions which will further help to push the boundaries.", |
|
"pdf_parse": { |
|
"paper_id": "2022", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Natural Language Processing (NLP), through its several applications, has been considered as one of the most valuable field in interdisciplinary researches, as well as in computer science. However, it is not without its flaws. One of the most common flaws is bias. This paper examines the main linguistic challenges of Inuktitut, an indigenous language of Canada, and focuses on gender bias identification and mitigation. We explore the unique characteristics of this language to help us understand the right techniques that can be used to identify and mitigate implicit biases. We use some methods to quantify the gender bias existing in Inuktitut word embeddings; then we proceed to mitigate the bias and evaluate the performance of the debiased embeddings. Next, we explain how approaches for detecting and reducing bias in English embeddings may be transferred to Inuktitut embeddings by properly taking into account the language's particular characteristics. We compare the effect of the debiasing techniques on Inuktitut and English. Finally, we highlight some future research directions which will further help to push the boundaries.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Despite the complexity of low resource and endangered languages, the study of these languages has pulled many researchers in recent years, while this can be an encouraging factor for the development of language technologies, the complex morphology of some languages and the lack of resources have been considered as barriers. Moreover, as many NLP tasks are trained on human language data, it is expected for these applications to exhibit biases in different forms. Hovy and Prabhumoy (2021) described five sources where bias can occur in NLP systems: (1) the data, (2) the annotation process, (3) the input representations, (4) the models, and finally (5) the research design.", |
|
"cite_spans": [ |
|
{ |
|
"start": 466, |
|
"end": 491, |
|
"text": "Hovy and Prabhumoy (2021)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Gender bias can be defined as prejudice toward one gender over the other. Though usually tacit, bias range from the use of gender defaults to associating between occupation and gender. As language technologies become widespread and deployed on a large scale, their social impact raises concerns both internally and externally (Hovy and Spruit, 2016; Dastin, 2018) . To capture the situation, Sun et al. (2019) reviewed NLP studies on this topic. However, their investigation is based on monolingual applications where the underlying assumptions and solutions may not directly apply to languages other than English. Thus, depending on the language involved and the factors taken into account, gender stereotypes have been conceptualized differently from study to study. To date, gender stereotypes have been addressed through a narrow problem-solving approach. While technical countermeasures are necessary, the failure to take a broader look at and engage with relevant literature outside of NLP could be detrimental to the growth of the field.", |
|
"cite_spans": [ |
|
{ |
|
"start": 326, |
|
"end": 349, |
|
"text": "(Hovy and Spruit, 2016;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 350, |
|
"end": 363, |
|
"text": "Dastin, 2018)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 392, |
|
"end": 409, |
|
"text": "Sun et al. (2019)", |
|
"ref_id": "BIBREF44" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "For example, when translating from English to French this following sentence, by Google Translate 1 :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "(en) The engineer has asked the nurse to help her get up from the bed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "(fr) L'ing\u00e9nieur a demand\u00e9 \u00e0 l'infirmi\u00e8re de l'aider \u00e0 se lever du lit.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We can see that it identified the engineer as a male and the nurse as a female, even though we used \"her\" to indicate that we are referring to a female. Such inadequacies not only jeopardize the development of endangered languages applications, but also perpetuate and amplify existent biases.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Understanding how human biases are incorporated into word embeddings can help us understand bias in NLP models, given that word embeddings are commonly used in NLP. While some significant work has been done toward minimizing the bias in the embeddings, it has been proved that some methods are insufficient and that the bias can remain hidden within the embeddings. The words frequency is not taken into account, regardless of the gender distances, therefore biased terms can remain clustered together. Furthermore, when applied to contextualized word embeddings, these bias approaches must be changed because the embedding representation of each word varies based on the context. This research intends to shed light on this issue by evaluating recent efforts to identify and mitigate bias within the indigenous languages revitalization and preservation context. We focus on Inuktitut, one of the main Inuit language of Eastern Canada and the official language of the government of Nunavut.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Thus, this paper is structured as follows: Section 2 presents the state-of-the-art. Section 3 presents the bias statement. Section 4 discusses the linguistic challenges of indigenous languages, with a focus on Inuktitut. Sections 5 highlights gender bias detection and mitigation. Section 7 presents the evaluations and the experimental results; while comparing with other existing approaches. Section 8 discusses the necessity of a human in the loop paradigm. Finally, Section 9 concludes this paper and presents potential future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Interest in understanding, assessing, and reducing gender bias continues to grow in the NLP field, with recent studies showing how gender disparities affect the language technologies. Sometimes, for example, when visual recognition tasks fail to recognize female doctors (Zhao et al., 2017; Rudinger et al., 2018) , image caption models do not detect women sitting next to the machine (Hendricks et al., 2018) ; and automatic speech recognition works best with male voices (Tatman, 2017) . Although previously unconcerned with these phenomena in research programs (Cislak et al., 2018) ; it is now widely recognized that NLP tools encode and reflect asymmetries controversial society for many seemingly neutral tasks, including machine translation (MT). Admittedly, this problem is not new.", |
|
"cite_spans": [ |
|
{ |
|
"start": 271, |
|
"end": 290, |
|
"text": "(Zhao et al., 2017;", |
|
"ref_id": "BIBREF49" |
|
}, |
|
{ |
|
"start": 291, |
|
"end": 313, |
|
"text": "Rudinger et al., 2018)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 385, |
|
"end": 409, |
|
"text": "(Hendricks et al., 2018)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 473, |
|
"end": 487, |
|
"text": "(Tatman, 2017)", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 564, |
|
"end": 585, |
|
"text": "(Cislak et al., 2018)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "A few years ago, Schiebinger (2014) criticized the phenomenon of \"missing men\" in machine translation after conducting one of his interviews through a commercial translation system. Although there are some feminine mentions in the text, the female pronoun \"she\" is mentioned several times by the masculine pronoun. Users of online machine translation tools have also expressed concern about gender, having noticed how commercial systems manipulate society's expectations of gender, for example by projecting the translation of engineer into masculinity and that of medical science into femininity. Bolukbasi et al. (2016) proved the existence of gender bias in English word embeddings, and proposed a method called Hard Debias to mitigate the gender bias. Liang et al. (2020) proposed a modified method that relies heavily on the sentences used to reduce biases.", |
|
"cite_spans": [ |
|
{ |
|
"start": 598, |
|
"end": 621, |
|
"text": "Bolukbasi et al. (2016)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We hypothesize that because English uses the common pronouns he and she extensively, which are not used in Inuktitut, as much as in English, for different reasons 2 ; the mitigation step encompasses a smaller gender subspace in comparison to English, and thus the bias is reduced.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Another method is the Iterative Null space Projection (INLP), which is a post-hoc method that can work on pre-trained representations (Ravfogel et al., 2020) . The INLP's concept aims to identify task direction by training linear classifiers and removing direction from representation. INLP is effective in reducing gender bias. It was tested and showed great results in both word embeddings and contextualized word embeddings.", |
|
"cite_spans": [ |
|
{ |
|
"start": 134, |
|
"end": 157, |
|
"text": "(Ravfogel et al., 2020)", |
|
"ref_id": "BIBREF38" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Most of the solutions were mainly proposed to reduce gender bias in English, and may not work as well when it comes to morphologically complex or polysynthetic languages. Nevertheless, there have been recent studies that explored the gender bias problem in languages other than English. Zhao et al. (2020) studied gender bias which is exhibited by multilingual embeddings in four languages (English, German, French, and Spanish) and demonstrated that such biases can impact cross-lingual transfer learning tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Lewis and Lupyan (2020) examined whether gender stereotypes are reflected in the large-scale distributional structure of natural language semantics and measured gender associations embedded in the statistics of 25 languages and related them to data on an international dataset of psychological gender associations. Choubey et al. 2021proposed gender-filtered self-training to improve gender translation accuracy on unambiguously gendered inputs. Their approach used a source monolingual corpus and an initial model to generate gender-specific pseudoparallel corpora, which were then filtered and added to the training data. They evaluated their method from English to five languages, which showed an improvement in gender accuracy without damaging gender equality. Ntoutsi et al. (2020) presented a wide multidisciplinary overview of bias in AI systems, with an emphasis on technological difficulties and solutions, as well as new research directions toward approaches that are well-grounded in a legal framework.", |
|
"cite_spans": [ |
|
{ |
|
"start": 765, |
|
"end": 786, |
|
"text": "Ntoutsi et al. (2020)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The bias study in machine learning is not only restricted to the computer science field. Interdisciplinary research can help address this challenge across disciplines such as psychology, sociology, linguistics, cognitive science, and more (Datta, 2018) . Hassan (2016) conducted a wide study on the influence that English has had on other language communities such as Inuit community. It can be seen in the way that it has affected gender relations specifically, by disempowering women in indigenous communities, the same as described in (Gudmestad et al., 2021) . Men were assigned the role of hunting, and as such, became the \"breadwinner\" of the family. Women, on the other hand, were relegated to take care of the house and children, leaving them with no economic power and a perceived subordinate role within the family (Leigh, 2009) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 239, |
|
"end": 252, |
|
"text": "(Datta, 2018)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 538, |
|
"end": 562, |
|
"text": "(Gudmestad et al., 2021)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 825, |
|
"end": 838, |
|
"text": "(Leigh, 2009)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "According to Williamson (2006) , the Inuits use a concept that encapsulates history, philosophy and observations of the world surrounding them. They call it \"Qaujimajatuqangit\" which is translated as \"traditional knowledge\". For Inuit people, \"Qaujimajatuqangit\" establishes gender equality in several fundamental ways. It respects the balance between the gender roles, the importance of family, and the fluidity of both gender and sexuality.", |
|
"cite_spans": [ |
|
{ |
|
"start": 13, |
|
"end": 30, |
|
"text": "Williamson (2006)", |
|
"ref_id": "BIBREF47" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Bias in NLP systems often goes without notice, it's often not even detected until after the systems are launched and used by consumers, which can have adverse effects on our society, such as when it shows false information to people which leads them to believe untrue things about society or them-selves; thereby changing their behavior for better or worse (Stanczak and Augenstein, 2021) . The harm of bias in NLP has been understated by some people and overstated by others, who dismiss its relevance or refuse to engage with it altogether. In this paper, we focus on the study of gender bias. If a system associates certain professions with a specific gender, this creates a representational harm. Representational harm is when an individual who falls into one of those categories is treated less fairly than someone outside of that category because of their belonging to it. For example, negative selection have been reported to occur more frequently in male dominated jobs than in other types of jobs (Davison and Burke, 2000) . Similar conclusions have been made in the areas of competency assessments and performance evaluations, women were rated less positively than men in line jobs (which tend to be male gender-typed), but not in staff jobs, according to a prominent financial services organization (Lyness and Heilman, 2006) . By looking at common examples of bias in the workplace, we can begin to understand how it can harm people in the office When such representations are being used in downstream NLP tasks. It can make the work environment feel less inclusive and less productive. Every single one of us has biases, but it's important to acknowledge when and how they impact our lives and the lives of others. According to recent research in NLP, word embeddings can incorporate social and implicit biases inherent in the training data (Swinger et al., 2019; Schlender and Spanakis, 2020; Caliskan, 2021) . Current NLP models have proven to be good at detecting prejudices (Ahmed et al., 2022) . However, unlike with prejudice, biases are not always obvious. While some biases are detectable via context, others might not be-which makes it difficult for automated systems to detect them. In fact, detecting and mitigating bias within automated systems prove to be more challenging than detecting it within human beings due to several important factors as dealing with imprecise sentiment analysis; as opposed to humans who can express nuanced sentiments when discussing bias. Our effort is predicated on the assumption that observed gender bias in systems are an indication of an insufficient interest into detecting and mitigating bias, we also believe that separating genders and professions in word embeddings would allow systems to detect and mitigate gender rather than promote it.", |
|
"cite_spans": [ |
|
{ |
|
"start": 357, |
|
"end": 388, |
|
"text": "(Stanczak and Augenstein, 2021)", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 1006, |
|
"end": 1031, |
|
"text": "(Davison and Burke, 2000)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1310, |
|
"end": 1336, |
|
"text": "(Lyness and Heilman, 2006)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 1854, |
|
"end": 1876, |
|
"text": "(Swinger et al., 2019;", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 1877, |
|
"end": 1906, |
|
"text": "Schlender and Spanakis, 2020;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1907, |
|
"end": 1922, |
|
"text": "Caliskan, 2021)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 1991, |
|
"end": 2011, |
|
"text": "(Ahmed et al., 2022)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bias Statement", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In this section, we present the main linguistic challenges of Canada's indigenous languages, especially Inuktitut, an Inuit language of Eastern Canada and official language of the government of Nunavut. Thus, to better understand the challenges of NLP in Inuktitut, we explore the structure of Inuktitut words, the levels of grammatical variations, the dialectal variations in spelling, and gender animacy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Linguistic Challenges in Indigenous Languages", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Most of the indigenous languages, particularly in the Americas, belong to either the polysynthetic language group or the agglutinative language group. They have a complex, rich morphology that plays an important role in human learning versus machine learning (Gasser, 2011; Littell et al., 2018) . Much of the research on their morphological analysis has focused only on linguistic aspects. Comparing word composition in English, the word structure in Inuit languages is variable in its surface form. Words can be very short, composed of three formative features such as word base, lexical suffixes, and grammatical ending suffixes. Or they can be very long up to ten or even fifteen formative morphemes as features depending on the regional dialect (Lowe, 1985; Kudlak and Compton, 2018 ; Le and Sadat, 2020, 2022).", |
|
"cite_spans": [ |
|
{ |
|
"start": 259, |
|
"end": 273, |
|
"text": "(Gasser, 2011;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 274, |
|
"end": 295, |
|
"text": "Littell et al., 2018)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 750, |
|
"end": 762, |
|
"text": "(Lowe, 1985;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 763, |
|
"end": 787, |
|
"text": "Kudlak and Compton, 2018", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Morphological complexity", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The morphophonemics of Inuktitut are highly complex, in addition to the variety of morphological suffixes that Inuktitut roots can take on (Mithun, 2015) . In Inuktitut, each morpheme specifies the sound variations that can occur to its left and/or to itself. These modifications are phonologically conditioned by the individual morphemes themselves, rather than their contexts. This not only aggravates the data sparsity issue, but it also poses morphological analysis issues, which we shall address in the research topics of this project.", |
|
"cite_spans": [ |
|
{ |
|
"start": 139, |
|
"end": 153, |
|
"text": "(Mithun, 2015)", |
|
"ref_id": "BIBREF35" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Morphophonemics", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The third aspect of Inuktitut which contributes to the challenge of processing it with a computer is the abundance of spelling variation seen in the electronically available texts. Inuktitut, like all languages, can be divided into a number of different dialects, such as Uummarmiutun, Siglitun, Inuinnaqtun, Natsilik, Kivallirmiutun, Aivilik, North Baffin, South Baffin, Arctic Quebec, and Laborador (Dorais, 1990) . The primary distinction between these dialects is phonological, which is reflected in spelling. As a result, spelling variance, either due to a lack of standardisation or due to numerous dialect changes, contributes significantly to the overall sparsity of the data in the corpora accessible for experimentation (Micher, 2018).", |
|
"cite_spans": [ |
|
{ |
|
"start": 401, |
|
"end": 415, |
|
"text": "(Dorais, 1990)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dialectal variations", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Inuit languages are known to have some particular linguistics challenges. There is no gender marking in nouns, like you'll find in French and Spanish (male / female) nouns. Instead, Inuktitut distinguishes words along a dimension called animacy, because of the cultural understanding as to whether a noun is known to be alive or not. The singular and plural suffixes that are used in nouns, depend on whether is is animate or inanimate.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Gender animacy", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "The animacy is described as a distinction between human and non-human, rational and irrational, socially active and socially passive 3 . For example, animate nouns are related to humans and animals most obviously, but other objects that are not considered alive, like stone, table, are considered as inanimate. Animate and inanimate gender is common in many Amerindian families such as Cree, Inuktitut, Quechuan, Aymara, Mapudungun, Iroquoian, and Siouan 4 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Gender animacy", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Although existing machine learning models achieve great results on many tasks, they generally fail in avoiding biases. Recent studies illustrate how bias affect NLP technologies, which has created a growing interest in identifying, analysing and mitigating bias within the NLP community. The problem is not new, it is well-known that NLP systems contain and reflect algorithmic bias in them, this controversial imbalances has developed a large scale of concerns about its social impact. NLP systems and tools are used in everyday life, The time of academic naivety is finished, therefore we must acknowledge that our models have an impact on people's lives, but not necessarily in the way we intend (Ehni, 2008) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 699, |
|
"end": 711, |
|
"text": "(Ehni, 2008)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bias detection and mitigation", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "To contextualize the plan within this larger research area, we will focus on indigenous languages that proves no exception to the existent problem of bias in NLP systems. Indigenous languages contain a wealth of secondary data about individuals, their identity and their demographic group, which are exploited to fulfil the objective of creating NLP systems. The focus on creating these systems has drifted us away from creating models as tools of understanding towards other tools that produce great results but are far more difficult to understand (Hovy and Prabhumoye, 2021) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 550, |
|
"end": 577, |
|
"text": "(Hovy and Prabhumoye, 2021)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bias detection and mitigation", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Many questions may arise, such as: Is it possible that NLP models are biased by definition? What could be the source of this bias ? Can we figure out what it is? Is there anything we can do about it ?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bias detection and mitigation", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Bias is a complex concept with overlapping definitions (Campolo et al., 2017) . It has been considered as a fundamental human decision-making process since the beginning of time (Kahneman and Tversky, 1973) . When we apply a cognitive bias, we are assuming that reality will behave in accordance with prior cognitive convictions that may or may not be accurate, with which we can make a judgement (Garrido-Mu\u00f1oz et al., 2021) . According to the Sociology dictionary 5 , bias is a term used to describe an unjust prejudice in favour of or against a person, group, or thing.", |
|
"cite_spans": [ |
|
{ |
|
"start": 55, |
|
"end": 77, |
|
"text": "(Campolo et al., 2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 178, |
|
"end": 206, |
|
"text": "(Kahneman and Tversky, 1973)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 397, |
|
"end": 425, |
|
"text": "(Garrido-Mu\u00f1oz et al., 2021)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Definition of Bias", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Machine learning bias can happen in a variety of ways, ranging from racial and gender discrimination to age discrimination. It also exists in machine learning algorithms throughout their development, which is the root problem of machine learning bias. Therefore, human biases are adopted and scaled by machine learning systems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Definition of Bias", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Machine learning models incorporate bias in many shapes, including gender, racial and religious biases extending to unfair recruiting and age discrimination. But what are really the machine learning types of bias?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Types of Bias", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "According to (Shashkina, 2022) , the most common types of machine learning bias found in algorithms are listed below:", |
|
"cite_spans": [ |
|
{ |
|
"start": 13, |
|
"end": 30, |
|
"text": "(Shashkina, 2022)", |
|
"ref_id": "BIBREF42" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Types of Bias", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "\u2022 Reporting bias: It happens when the frequency of occurrences in the training dataset does not 5 Open Education Sociology Dictionary: https:// sociologydictionary.org/bias/ precisely reflect reality.", |
|
"cite_spans": [ |
|
{ |
|
"start": 96, |
|
"end": 97, |
|
"text": "5", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Types of Bias", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "\u2022 Selection bias: This sort of bias happens when training data is either unrepresentative or not randomly selected.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Types of Bias", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "\u2022 Group attribution bias: It happens when machine learning systems generalize what is true of individuals to entire groups that the individual is or is not a part of.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Types of Bias", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "\u2022 Implicit bias: It happens when machine learning systems are based on data that is created on personal experience which does not necessarily apply broadly.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Types of Bias", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We still have a long way to go before machine learning bias is completely eliminated. With the increased usage of machine learning systems in sensitive domains such as banking, criminal justice, and healthcare, we should aim to create algorithms that reduce bias in machine learning systems. Collaboration between human skills and machine learning is required to solve the problem of bias in machine learning. It will help us in the detection and mitigation of biases by figuring out how machine learning systems make predictions and what data aspects it uses to make judgments. This will help us understand whether the elements influencing the choice are biased.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mitigating Bias", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "In this study, we use a methodology and data for bias mitigation in Inuktitut, as described in the following section. To analyse and mitigate bias in word embeddings, multiple sets of data (e.g. pairs of sentences, lists of gendered words, and combinations of sentences from different categories) are required. Two algorithms are used to measure bias in embeddings, which are applicable to traditional embeddings. Then we demonstrate how we mitigate bias in either type of embedding and examine how well the bias mitigation works on downstream tasks. Furthermore, because this study is based on Inuktitut embeddings, the data used is from the Nunavut Hansard Inuktitut-English Parallel Corpus 3.0 (Joanis et al., 2020) as for English.", |
|
"cite_spans": [ |
|
{ |
|
"start": 697, |
|
"end": 718, |
|
"text": "(Joanis et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bias Mitigation for Inuktitut", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "This method, proposed by Caliskan et al. (2017) , helps to measure human bias in data presented as texts. It is similar to the Implicit Association Test (IAT) proposed by (Greenwald et al., 1998) . The similarity of IAT and WEAT consists of using two lists of target words and two lists of attribute words. The first pair of lists represents the terms we want to compare and the second pair of lists represents the categories in which we suspect bias could exist (Mulsa and Spanakis, 2020) . By using WEAT, Caliskan et al. (2017) defined ten tests to assess the bias in several areas (Mulsa and Spanakis, 2020) . In our study we converted the WEAT lists of words used in the tests to Inuktitut and modified them such that terms in these lists are only related with the appropriate category. Some of the modifications correspond to the different linguistic characteristics of the language and the lack of meaningful translations of certain words in the data. Some other changes are due to the language's various linguistic peculiarities and the lack of relevant translations for particular words in the data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 25, |
|
"end": 47, |
|
"text": "Caliskan et al. (2017)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 171, |
|
"end": 195, |
|
"text": "(Greenwald et al., 1998)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 474, |
|
"end": 489, |
|
"text": "Spanakis, 2020)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 507, |
|
"end": 529, |
|
"text": "Caliskan et al. (2017)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 595, |
|
"end": 610, |
|
"text": "Spanakis, 2020)", |
|
"ref_id": "BIBREF36" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Embedding Association Test (WEAT)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Gonen and Goldberg (2019) provided a new metric that shows that word embeddings with reduced bias can stay grouped together even when the range across attributes and targeted words (in WEAT) is minimal. To determine the gender orientation of each word in the lexicon, the clustering accuracy test necessitates projecting the entire vocabulary into male and female terms (Mulsa and Spanakis, 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 381, |
|
"end": 396, |
|
"text": "Spanakis, 2020)", |
|
"ref_id": "BIBREF36" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Clustering accuracy", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The pronouns he and she were used by Gonen and Goldberg (2019) , because they are commonly used and the only variation between them is in the gender subdomain.", |
|
"cite_spans": [ |
|
{ |
|
"start": 37, |
|
"end": 62, |
|
"text": "Gonen and Goldberg (2019)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Clustering accuracy", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Inuktitut has few personal pronouns, either in first person (I, we) or second person (you) 6 ; which represents a problem in this research by adding extra meaning besides gender to the geometrical difference of the pronouns (Mulsa and Spanakis, 2020).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Clustering accuracy", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this section, we present the debiaising methods used in this research with an application on the Inuktitut language.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Debiasing Methods", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Hard debias (Bolukbasi et al., 2016) One of the earliest strategies used to detect and minimise bias in word embeddings was Hard Debias. Through post-processing, it removes gender bias by 6 https://uqausiit.ca/grammar-book subtracting the component linked with gender from all embeddings. It takes a set of gender-specific word pairs and computes the gender direction in the embedding space as the first principal component of difference vectors of these pairs. Furthermore, it removes gender bias by projecting biased word embeddings onto a subspace orthogonal to the assumed gender direction (Bolukbasi et al., 2016) . The gender orientation is skewed by the frequency of words.", |
|
"cite_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 36, |
|
"text": "(Bolukbasi et al., 2016)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 594, |
|
"end": 618, |
|
"text": "(Bolukbasi et al., 2016)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Debiasing Methods", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "SENT-Debias is divided into four steps: 1) identifying words with bias attributes; 2) contextualising these words into bias attribute sentences and, as a result, their sentence representations; 3) estimating the sentence representation bias subspace; and 4) debiasing general sentences by eliminating the projection onto this bias subspace . These processes are summarized in Figure 1 . Iterative NullSpace Projection (Ravfogel et al., 2020) INLP stands for Iterative Nullspace Projection, which is a method for eliminating data from neuronal representations (Figure 2 ). This algorithm is built on repeatedly training linear classifiers that predict a specific property that we want to eliminate; then projecting the representations onto their null-space. As a result, the classifiers loose sight of the target property, making it difficult to linearly divide the data based on it. While this method is relevant to a variety of applications, it was tested on bias and fairness use-cases and demonstrated that it can mitigate bias in word embeddings.", |
|
"cite_spans": [ |
|
{ |
|
"start": 418, |
|
"end": 441, |
|
"text": "(Ravfogel et al., 2020)", |
|
"ref_id": "BIBREF38" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 376, |
|
"end": 384, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 559, |
|
"end": 568, |
|
"text": "(Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "SENT debias (Liang et al., 2020)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We conducted some experiments on gender bias mitigation in Inuktitut language. We used the Nunavut Hansard Inuktitut-English Parallel Corpus 3.0 (Joanis et al., 2020) . The statistics of the training corpus are described in Table 1 . (Ravfogel et al., 2020) . 20, 657, 477 1, 293, 348 5, 433 6, 139 English 10, 962, 904 1, 293, 348 5, 433 6, 139 We performed our experiment using word embeddings, trained on the Nunavut Hansard for Inuktitut-English. In order to pre-train the embeddings for Inuktitut, we used an Inuktitut segmenter to segmentate the words before passing it to the FastText toolkit (Bojanowski et al., 2016) . The model was trained for 40 epochs and we used 150 and 300 as the size of the dense vector to represent each token or word. In order to get terms that are more related and close to each other we used a small window of 2 which give us the maximum distance between the target word and its neighboring word. We also used an alpha value of 0.03 to preserve the strong correlation of the model after each training example is evaluated.", |
|
"cite_spans": [ |
|
{ |
|
"start": 145, |
|
"end": 166, |
|
"text": "(Joanis et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 234, |
|
"end": 257, |
|
"text": "(Ravfogel et al., 2020)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 260, |
|
"end": 263, |
|
"text": "20,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 264, |
|
"end": 268, |
|
"text": "657,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 269, |
|
"end": 275, |
|
"text": "477 1,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 276, |
|
"end": 280, |
|
"text": "293,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 281, |
|
"end": 287, |
|
"text": "348 5,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 288, |
|
"end": 294, |
|
"text": "433 6,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 295, |
|
"end": 310, |
|
"text": "139 English 10,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 311, |
|
"end": 315, |
|
"text": "962,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 316, |
|
"end": 322, |
|
"text": "904 1,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 323, |
|
"end": 327, |
|
"text": "293,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 328, |
|
"end": 334, |
|
"text": "348 5,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 335, |
|
"end": 341, |
|
"text": "433 6,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 342, |
|
"end": 345, |
|
"text": "139", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 600, |
|
"end": 625, |
|
"text": "(Bojanowski et al., 2016)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 224, |
|
"end": 231, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data and Evaluations", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Dataset #tokens #train #dev #test Inuktitut", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data and Evaluations", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We performed the WEAT test on the adapted lists of words translated to Inuktitut. Among all the traditional word embeddings, we see high effect sizes and multiple tests are significant at different levels. The results of the WEAT effect sizes on gendered related tests are shown in Table 2 where we see an overall high effect size across all the scores on the original models.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 282, |
|
"end": 289, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data and Evaluations", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The results of the WEAT effect sizes on gendered related tests are shown in Table 2 where we see a high effect size on the word embeddings debiased from the original models. The results after the debiasing step shows that the bias mitigation is Because Inuktitut is a genderless language, it can be difficult to use pronouns. Therefore following (Gonen and Goldberg, 2019) , we used common names for males and females instead of specifically gendered words to indicate the male and female categories (e.g. pronouns). Three tests compare the associations of male and female names to (1) job and family-related words, (2) art words, and (3) scientific domains. We observe that, following the projection, the substantial relationship between the groups is no longer there in the three tests. Figure 3 shows projections of the 200 most female-biased and 200 male-biased words projected at t = 1, which is basically the original state, and t = 35 which is the final state after debiasing. These results represent the INLP method. The results clearly demonstrate that the classes are no longer linearly separable in the INLP method. This behavior is qualitatively different from the Sent debias and the Hard debias methods; which are shown to maintain much of the proximity between female and male-biased vectors.", |
|
"cite_spans": [ |
|
{ |
|
"start": 346, |
|
"end": 372, |
|
"text": "(Gonen and Goldberg, 2019)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 76, |
|
"end": 83, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 789, |
|
"end": 798, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data and Evaluations", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We hypothesize, in this paper, that identifying the true gender orientation of word embeddings using these existing Debias approaches could be challenging. We show that the geometry of word em- beddings is influenced by word frequency. Popular and rare words, for example, cluster in various subregions of the embedding space, regardless of the fact that the words in these clusters are semantically unrelated. This may have a negative impact on the process of determining gender direction and, as a result, the efficacy of debiasing methods to debias the gender. We saw that changing the frequency of certain phrases causes large changes in the similarities between the related difference vector and other difference vectors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "We noticed, in the context of gender bias, one disadvantage that we found out, is that all of our 3 debiasing methods, like other learning approaches, are dependent on the data that is supplied to it; and assumes that the training data is suitably large and sampled from the same distribution as the test data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "In practice, this requirement is difficult to achieve, and failing to supply properly representative training data may result in biased classifications even after it has been applied.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "We further emphasize that the WEAT and clustering tests do not test for the absence of bias; rather, they test if bias exists in the test instances, but bias may also exist in non-tested cases. Even if we measure bias from a different perspective, the bias remains, indicating that more studies on bias mitigation approaches are needed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "For indigenous peoples in general, the language is directly connected to their culture and identity. Thus, it is very important for indigenous peoples of Canada, to both, speak their language and practice their culture. Inuktitut not only represents the official language of Inuits but also represents the rich culture of this community. With recent advances, NLP models represent a big opportunity for the development of tools that will further help in preserving the language with respect for the culture and realities of the indigenous people where the language takes a big part of it.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human-in-the-Loop Paradigm", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Most communities in Nunavut offer Inuktitut or Inuinnaqtun for the first few years of education, and the government has vowed to develop completely bilingual students across the territory 7 . As a result, the problem remains unsolved. As a nonindigenous people with a strong academic interests in social science, linguistics and NLP, Dorais (2010) cites that gaining a better grasp of the general sociolinguistic situation in Northern Canada is the first step toward a true solution to the Inuit culture and language difficulties. It is insufficient to describe how Inuit people communicate (which is the task of linguists). We must also attempt to comprehend what they are saying and what language means to them (Dorais, 2010) . Revitalizing indigenous language should be done for, by and with indigenous communities. With the emergence of AI, especially deep learning, there is a large interest for the revitalization of indigenous languages. However, there is little interest in the field of computer science, and there are also very few or no researchers from Canada's Indigenous communities in the field of NLP.", |
|
"cite_spans": [ |
|
{ |
|
"start": 713, |
|
"end": 727, |
|
"text": "(Dorais, 2010)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human-in-the-Loop Paradigm", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "It's evident that human skills like insight and creativity be easily computerized, therefore collaborating human skills with machine learning technologies is a great approach to keep human in the loop for developing technologies for us. Before building machine learning algorithms, it's a good idea to consult with humanists and social scientists to verify that the models we create don't inherit any of the biases that people have.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human-in-the-Loop Paradigm", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Machine learning models can assist us in revealing flaws in human decision-making. So, if these models trained on current human decisions reveal bias, it will be important to have a second look from human to keep this models fair. In the case of developing machine learning technologies for indigenous communities, it is important to keep the collaboration and partnership with them; before, while and after developing tools for them. Engaging communities to develop machine learning tools is very important, not only it will make the tool more suitable and tailored to their needs but it will also give the ownership to these communities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human-in-the-Loop Paradigm", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "This paper demonstrates that gender bias exists in Inuktitut, among other biases (as probably in other languages as well). Then, by appropriately translating the data and taking into account the language's specific characteristics, we illustrated how approaches used to measure and reduce biases in English embeddings can be applied to Inuktitut embeddings. Furthermore, we investigated the influence of mitigating approaches on downstream tasks, finding a major effect in traditional embeddings, which could be regarded as favourable if the embeddings utilised guarantee a more genderneutral approach. As a future work, we plan to investigate other types of biases in Inuktitut and collaborate with the Indigenous community. Our main objective remain the revitalization and preservation of Indigenous languages of Canada, using NLP and machie learning techniques. We hope that these exploratory results will encourage researches on Indigenous and Endangered languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "https://translate.google.ca/, consulted at April 14th, 2022", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://uqausiit.ca/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://en.wikipedia.org/wiki/List_of_languages_by _type_of_grammatical_genders 4 https://linguisticmaps.tumblr.com/post/169273617313/ grammatical-gender-or-noun-class-categories-new", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Source: https://www. thecanadianencyclopedia.ca/en/article/ inuktitut", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Michael Gasser. 2011. Computational morphology and the teaching of indigenous languages. In Indigenous Languages of Latin America Actas del Primer Simposio sobre Ense\u00f1anza de Lenguas Ind\u00edgenas de Am\u00e9rica Latina, page 52.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Tackling racial bias in automated online hate detection: Towards fair and accurate detection of hateful users with geometric deep learning", |
|
"authors": [ |
|
{ |
|
"first": "Zo", |
|
"middle": [], |
|
"last": "Ahmed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bertie", |
|
"middle": [], |
|
"last": "Vidgen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Scott A", |
|
"middle": [], |
|
"last": "Hale", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "EPJ Data Science", |
|
"volume": "11", |
|
"issue": "1", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zo Ahmed, Bertie Vidgen, and Scott A Hale. 2022. Tackling racial bias in automated online hate detec- tion: Towards fair and accurate detection of hateful users with geometric deep learning. EPJ Data Sci- ence, 11(1):8.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Enriching word vectors with subword information", |
|
"authors": [ |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1607.04606" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2016. Enriching word vec- tors with subword information. arXiv preprint arXiv:1607.04606.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Man is to computer programmer as woman is to homemaker?", |
|
"authors": [ |
|
{ |
|
"first": "Tolga", |
|
"middle": [], |
|
"last": "Bolukbasi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Zou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Venkatesh", |
|
"middle": [], |
|
"last": "Saligrama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Kalai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tolga Bolukbasi, Kai-Wei Chang, James Zou, Venkatesh Saligrama, and Adam Kalai. 2016. Man is to computer programmer as woman is to homemaker? debiasing word embeddings.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Detecting and mitigating bias in natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Aylin", |
|
"middle": [], |
|
"last": "Caliskan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Res. Rep, Brookings Inst", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aylin Caliskan. 2021. Detecting and mitigating bias in natural language processing. Res. Rep, Brookings Inst., Washington, DC [Google Scholar].", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Semantics derived automatically from language corpora contain human-like biases", |
|
"authors": [ |
|
{ |
|
"first": "Aylin", |
|
"middle": [], |
|
"last": "Caliskan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joanna", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Bryson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arvind", |
|
"middle": [], |
|
"last": "Narayanan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Science", |
|
"volume": "356", |
|
"issue": "6334", |
|
"pages": "183--186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aylin Caliskan, Joanna J Bryson, and Arvind Narayanan. 2017. Semantics derived automatically from lan- guage corpora contain human-like biases. Science, 356(6334):183-186.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Improving gender translation accuracy with filtered self-training", |
|
"authors": [ |
|
{ |
|
"first": "Prafulla", |
|
"middle": [], |
|
"last": "Kumar Choubey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Currey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prashant", |
|
"middle": [], |
|
"last": "Mathur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Georgiana", |
|
"middle": [], |
|
"last": "Dinu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2104.07695" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Prafulla Kumar Choubey, Anna Currey, Prashant Mathur, and Georgiana Dinu. 2021. Improving gen- der translation accuracy with filtered self-training. arXiv preprint arXiv:2104.07695.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Bias against research on gender bias", |
|
"authors": [ |
|
{ |
|
"first": "Aleksandra", |
|
"middle": [], |
|
"last": "Cislak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Magdalena", |
|
"middle": [], |
|
"last": "Formanowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tamar", |
|
"middle": [], |
|
"last": "Saguy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Scientometrics", |
|
"volume": "115", |
|
"issue": "1", |
|
"pages": "189--200", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aleksandra Cislak, Magdalena Formanowicz, and Tamar Saguy. 2018. Bias against research on gender bias. Scientometrics, 115(1):189-200.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Amazon scraps secret ai recruiting tool that showed bias against women", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dastin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Ethics of Data and Analytics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "296--299", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Dastin. 2018. Amazon scraps secret ai recruit- ing tool that showed bias against women. In Ethics of Data and Analytics, pages 296-299. Auerbach Publications.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Decolonizing both researcher and research and its effectiveness in indigenous research", |
|
"authors": [ |
|
{ |
|
"first": "Ranjan", |
|
"middle": [], |
|
"last": "Datta", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Research Ethics", |
|
"volume": "14", |
|
"issue": "2", |
|
"pages": "1--24", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ranjan Datta. 2018. Decolonizing both researcher and research and its effectiveness in indigenous research. Research Ethics, 14(2):1-24.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Sex discrimination in simulated employment contexts: A meta-analytic investigation", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Heather", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael J", |
|
"middle": [], |
|
"last": "Davison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Burke", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Journal of Vocational Behavior", |
|
"volume": "56", |
|
"issue": "2", |
|
"pages": "225--248", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Heather K Davison and Michael J Burke. 2000. Sex discrimination in simulated employment contexts: A meta-analytic investigation. Journal of Vocational Behavior, 56(2):225-248.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "L'\u00e9tranger aux yeux du francophone de qu\u00e9bec", |
|
"authors": [ |
|
{ |
|
"first": "Louis-Jacques", |
|
"middle": [], |
|
"last": "Dorais", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "Recherches sociographiques", |
|
"volume": "31", |
|
"issue": "1", |
|
"pages": "11--23", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Louis-Jacques Dorais. 1990. L'\u00e9tranger aux yeux du francophone de qu\u00e9bec. Recherches sociographiques, 31(1):11-23.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Language of the Inuit: syntax, semantics, and society in the Arctic", |
|
"authors": [ |
|
{ |
|
"first": "Louis-Jacques", |
|
"middle": [], |
|
"last": "Dorais", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "58", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Louis-Jacques Dorais. 2010. Language of the Inuit: syn- tax, semantics, and society in the Arctic, volume 58. McGill-Queen's Press-MQUP.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Dual use and the ethical responsibility of scientists", |
|
"authors": [ |
|
{ |
|
"first": "Hans-J\u00f6rg", |
|
"middle": [], |
|
"last": "Ehni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Archivum immunologiae et therapiae experimentalis", |
|
"volume": "56", |
|
"issue": "3", |
|
"pages": "147--152", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hans-J\u00f6rg Ehni. 2008. Dual use and the ethical re- sponsibility of scientists. Archivum immunologiae et therapiae experimentalis, 56(3):147-152.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "A survey on bias in deep nlp", |
|
"authors": [ |
|
{ |
|
"first": "Ismael", |
|
"middle": [], |
|
"last": "Garrido-Mu\u00f1oz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arturo", |
|
"middle": [], |
|
"last": "Montejo-R\u00e1ez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando", |
|
"middle": [], |
|
"last": "Mart\u00ednez-Santiago", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L Alfonso Ure\u00f1a-L\u00f3pez", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Applied Sciences", |
|
"volume": "11", |
|
"issue": "7", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ismael Garrido-Mu\u00f1oz, Arturo Montejo-R\u00e1ez, Fer- nando Mart\u00ednez-Santiago, and L Alfonso Ure\u00f1a- L\u00f3pez. 2021. A survey on bias in deep nlp. Applied Sciences, 11(7):3184.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Lipstick on a pig: Debiasing methods cover up systematic gender biases in word embeddings but do not remove them", |
|
"authors": [ |
|
{ |
|
"first": "Hila", |
|
"middle": [], |
|
"last": "Gonen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hila Gonen and Yoav Goldberg. 2019. Lipstick on a pig: Debiasing methods cover up systematic gender biases in word embeddings but do not remove them.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Measuring individual differences in implicit cognition: the implicit association test", |
|
"authors": [ |
|
{ |
|
"first": "Debbie", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Anthony G Greenwald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jordan Lk", |
|
"middle": [], |
|
"last": "Mcghee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Journal of personality and social psychology", |
|
"volume": "74", |
|
"issue": "6", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anthony G Greenwald, Debbie E McGhee, and Jor- dan LK Schwartz. 1998. Measuring individual differ- ences in implicit cognition: the implicit association test. Journal of personality and social psychology, 74(6):1464.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Moving beyond the native-speaker bias in the analysis of variable gender marking. Frontiers in Communication", |
|
"authors": [ |
|
{ |
|
"first": "Aarnes", |
|
"middle": [], |
|
"last": "Gudmestad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanda", |
|
"middle": [], |
|
"last": "Edmonds", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Metzger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aarnes Gudmestad, Amanda Edmonds, and Thomas Metzger. 2021. Moving beyond the native-speaker bias in the analysis of variable gender marking. Fron- tiers in Communication, page 165.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "De-colonizing gender in indigenous language revitalization efforts. Western Papers in Linguistics/Cahiers linguistiques de Western", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Jenna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hassan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jenna N Hassan. 2016. De-colonizing gender in in- digenous language revitalization efforts. Western Pa- pers in Linguistics/Cahiers linguistiques de Western, 1(2):4.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Women also snowboard: Overcoming bias in captioning models", |
|
"authors": [ |
|
{ |
|
"first": "Lisa", |
|
"middle": [ |
|
"Anne" |
|
], |
|
"last": "Hendricks", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kaylee", |
|
"middle": [], |
|
"last": "Burns", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kate", |
|
"middle": [], |
|
"last": "Saenko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Darrell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Rohrbach", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the European Conference on Computer Vision (ECCV)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "771--787", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lisa Anne Hendricks, Kaylee Burns, Kate Saenko, Trevor Darrell, and Anna Rohrbach. 2018. Women also snowboard: Overcoming bias in captioning mod- els. In Proceedings of the European Conference on Computer Vision (ECCV), pages 771-787.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Five sources of bias in natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Dirk", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shrimai", |
|
"middle": [], |
|
"last": "Prabhumoy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Language and Linguistics Compass", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dirk Hovy and Shrimai Prabhumoy. 2021. Five sources of bias in natural language processing. Language and Linguistics Compass.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Five sources of bias in natural language processing. Language and Linguistics Compass", |
|
"authors": [ |
|
{ |
|
"first": "Dirk", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shrimai", |
|
"middle": [], |
|
"last": "Prabhumoye", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "15", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1111/lnc3.12432" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dirk Hovy and Shrimai Prabhumoye. 2021. Five sources of bias in natural language processing. Lan- guage and Linguistics Compass, 15(8):e12432.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "The social impact of natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Dirk", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Shannon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Spruit", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "591--598", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dirk Hovy and Shannon L Spruit. 2016. The social im- pact of natural language processing. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 591-598.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Chi-kiu Lo, and Darlene Stewart. 2020. The nunavut hansard inuktitut-english parallel corpus 3.0 with preliminary machine translation results", |
|
"authors": [ |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Joanis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rebecca", |
|
"middle": [], |
|
"last": "Knowles", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roland", |
|
"middle": [], |
|
"last": "Kuhn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Larkin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Littell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the 12th Conference on Language Resources and Evaluation (LREC 2020)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2562--2572", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eric Joanis, Rebecca Knowles, Roland Kuhn, Samuel Larkin, Patrick Littell, Chi-kiu Lo, and Darlene Stew- art. 2020. The nunavut hansard inuktitut-english par- allel corpus 3.0 with preliminary machine translation results. In Proceedings of the 12th Conference on Language Resources and Evaluation (LREC 2020), pages 2562--2572.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "On the psychology of prediction", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Kahneman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amos", |
|
"middle": [], |
|
"last": "Tversky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1973, |
|
"venue": "Psychological review", |
|
"volume": "80", |
|
"issue": "4", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Kahneman and Amos Tversky. 1973. On the psychology of prediction. Psychological review, 80(4):237.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Revitalization of indigenous languages through pre-processing and neural machine translation: The case of inuktitut", |
|
"authors": [ |
|
{ |
|
"first": "Ngoc", |
|
"middle": [ |
|
"Tan" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Le", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fatiha", |
|
"middle": [], |
|
"last": "Sadat", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 28th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4661--4666", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ngoc Tan Le and Fatiha Sadat. 2020. Revitalization of indigenous languages through pre-processing and neural machine translation: The case of inuktitut. In Proceedings of the 28th International Conference on Computational Linguistics, pages 4661-4666.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Towards a lowresource neural machine translation for indigenous languages in canada", |
|
"authors": [ |
|
{ |
|
"first": "Ngoc", |
|
"middle": [ |
|
"Tan" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Le", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fatiha", |
|
"middle": [], |
|
"last": "Sadat", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "Journal TAL, special issue on Language Diversity", |
|
"volume": "62", |
|
"issue": "", |
|
"pages": "39--63", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ngoc Tan Le and Fatiha Sadat. 2022. Towards a low- resource neural machine translation for indigenous languages in canada. Journal TAL, special issue on Language Diversity, 62:3:39-63.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Colonialism, gender and the family in north america: For a gendered analysis of indigenous struggles", |
|
"authors": [ |
|
{ |
|
"first": "Darcy", |
|
"middle": [], |
|
"last": "Leigh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Studies in Ethnicity and Nationalism", |
|
"volume": "9", |
|
"issue": "", |
|
"pages": "70--88", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1111/j.1754-9469.2009.01029.x" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Darcy Leigh. 2009. Colonialism, gender and the family in north america: For a gendered analysis of indige- nous struggles. Studies in Ethnicity and Nationalism, 9:70 -88.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Gender stereotypes are reflected in the distributional structure of 25 languages", |
|
"authors": [ |
|
{ |
|
"first": "Molly", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gary", |
|
"middle": [], |
|
"last": "Lupyan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Nature human behaviour", |
|
"volume": "4", |
|
"issue": "10", |
|
"pages": "1021--1028", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Molly Lewis and Gary Lupyan. 2020. Gender stereo- types are reflected in the distributional structure of 25 languages. Nature human behaviour, 4(10):1021- 1028.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Towards debiasing sentence representations", |
|
"authors": [ |
|
{ |
|
"first": "Irene", |
|
"middle": [ |
|
"Mengze" |
|
], |
|
"last": "Paul Pu Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chong", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Lim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Louis-Philippe", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Morency", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2007.08100" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paul Pu Liang, Irene Mengze Li, Emily Zheng, Yao Chong Lim, Ruslan Salakhutdinov, and Louis- Philippe Morency. 2020. Towards debiasing sentence representations. arXiv preprint arXiv:2007.08100.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Indigenous language technologies in canada: Assessment, challenges, and successes", |
|
"authors": [ |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Littell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Kazantseva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roland", |
|
"middle": [], |
|
"last": "Kuhn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [], |
|
"last": "Pine", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antti", |
|
"middle": [], |
|
"last": "Arppe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Cox", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marie-Odile", |
|
"middle": [], |
|
"last": "Junker", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2620--2632", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Patrick Littell, Anna Kazantseva, Roland Kuhn, Aidan Pine, Antti Arppe, Christopher Cox, and Marie-Odile Junker. 2018. Indigenous language technologies in canada: Assessment, challenges, and successes. In Proceedings of the 27th International Conference on Computational Linguistics, pages 2620-2632.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Basic Siglit Inuvialuit Eskimo Grammar", |
|
"authors": [ |
|
{ |
|
"first": "Ronald", |
|
"middle": [], |
|
"last": "Lowe", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1985, |
|
"venue": "", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ronald Lowe. 1985. Basic Siglit Inuvialuit Eskimo Grammar, volume 6. Inuvik, NWT: Committee for Original Peoples Entitlement.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "When fit is fundamental: performance evaluations and promotions of upper-level female and male managers", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Karen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Madeline", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Lyness", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Heilman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Journal of Applied Psychology", |
|
"volume": "91", |
|
"issue": "4", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Karen S Lyness and Madeline E Heilman. 2006. When fit is fundamental: performance evaluations and pro- motions of upper-level female and male managers. Journal of Applied Psychology, 91(4):777.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Addressing challenges of machine translation of inuit languages", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Jeffrey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Micher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey C Micher. 2018. Addressing challenges of ma- chine translation of inuit languages. Technical re- port, US Army Research Laboratory Adelphi United States.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Morphological complexity and language contact in languages indigenous to north america", |
|
"authors": [ |
|
{ |
|
"first": "Marianne", |
|
"middle": [], |
|
"last": "Mithun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Linguistic Discovery", |
|
"volume": "13", |
|
"issue": "2", |
|
"pages": "37--59", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marianne Mithun. 2015. Morphological complexity and language contact in languages indigenous to north america. Linguistic Discovery, 13(2):37-59.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Evaluating bias in dutch word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Rodrigo", |
|
"middle": [ |
|
"Alejandro" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ch\u00e1vez", |
|
"middle": [], |
|
"last": "Mulsa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerasimos", |
|
"middle": [], |
|
"last": "Spanakis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2011.00244" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rodrigo Alejandro Ch\u00e1vez Mulsa and Gerasimos Spanakis. 2020. Evaluating bias in dutch word em- beddings. arXiv preprint arXiv:2011.00244.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Bias in data-driven artificial intelligence systems-an introductory survey", |
|
"authors": [ |
|
{ |
|
"first": "Eirini", |
|
"middle": [], |
|
"last": "Ntoutsi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pavlos", |
|
"middle": [], |
|
"last": "Fafalios", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ujwal", |
|
"middle": [], |
|
"last": "Gadiraju", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vasileios", |
|
"middle": [], |
|
"last": "Iosifidis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wolfgang", |
|
"middle": [], |
|
"last": "Nejdl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maria-Esther", |
|
"middle": [], |
|
"last": "Vidal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salvatore", |
|
"middle": [], |
|
"last": "Ruggieri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Franco", |
|
"middle": [], |
|
"last": "Turini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Symeon", |
|
"middle": [], |
|
"last": "Papadopoulos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emmanouil", |
|
"middle": [], |
|
"last": "Krasanakis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Data Mining and Knowledge Discovery", |
|
"volume": "10", |
|
"issue": "3", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eirini Ntoutsi, Pavlos Fafalios, Ujwal Gadiraju, Vasileios Iosifidis, Wolfgang Nejdl, Maria-Esther Vidal, Salvatore Ruggieri, Franco Turini, Symeon Papadopoulos, Emmanouil Krasanakis, et al. 2020. Bias in data-driven artificial intelligence systems-an introductory survey. Wiley Interdisciplinary Re- views: Data Mining and Knowledge Discovery, 10(3):e1356.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Null it out: Guarding protected attributes by iterative nullspace projection", |
|
"authors": [ |
|
{ |
|
"first": "Shauli", |
|
"middle": [], |
|
"last": "Ravfogel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanai", |
|
"middle": [], |
|
"last": "Elazar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hila", |
|
"middle": [], |
|
"last": "Gonen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Twiton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7237--7256", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.647" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shauli Ravfogel, Yanai Elazar, Hila Gonen, Michael Twiton, and Yoav Goldberg. 2020. Null it out: Guard- ing protected attributes by iterative nullspace projec- tion. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7237-7256, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Gender bias in coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Rachel", |
|
"middle": [], |
|
"last": "Rudinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Naradowsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Leonard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "8--14", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-2002" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rachel Rudinger, Jason Naradowsky, Brian Leonard, and Benjamin Van Durme. 2018. Gender bias in coreference resolution. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 8-14, New Orleans, Louisiana. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Scientific research must take gender into account", |
|
"authors": [ |
|
{ |
|
"first": "Londa", |
|
"middle": [], |
|
"last": "Schiebinger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Nature", |
|
"volume": "507", |
|
"issue": "7490", |
|
"pages": "9--9", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Londa Schiebinger. 2014. Scientific research must take gender into account. Nature, 507(7490):9-9.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "2020. 'thy algorithm shalt not bear false witness': An evaluation of multiclass debiasing methods on word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Thalea", |
|
"middle": [], |
|
"last": "Schlender", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerasimos", |
|
"middle": [], |
|
"last": "Spanakis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Benelux Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "141--156", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thalea Schlender and Gerasimos Spanakis. 2020. 'thy algorithm shalt not bear false witness': An evalua- tion of multiclass debiasing methods on word embed- dings. In Benelux Conference on Artificial Intelli- gence, pages 141-156. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Ai bias: Definition, types, examples, and debiasing strategies", |
|
"authors": [ |
|
{ |
|
"first": "Victoria", |
|
"middle": [], |
|
"last": "Shashkina", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Victoria Shashkina. 2022. Ai bias: Defini- tion, types, examples, and debiasing strate- gies. https://itrexgroup.com/blog/ai-bias-definition- types-examples-debiasing-strategies/header, (1):1.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "A survey on gender bias in natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Karolina", |
|
"middle": [], |
|
"last": "Stanczak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Isabelle", |
|
"middle": [], |
|
"last": "Augenstein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2112.14168" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Karolina Stanczak and Isabelle Augenstein. 2021. A survey on gender bias in natural language processing. arXiv preprint arXiv:2112.14168.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Mitigating gender bias in natural language processing: Literature review", |
|
"authors": [ |
|
{ |
|
"first": "Tony", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Gaut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shirlyn", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuxin", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mai", |
|
"middle": [], |
|
"last": "Elsherief", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jieyu", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diba", |
|
"middle": [], |
|
"last": "Mirza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elizabeth", |
|
"middle": [], |
|
"last": "Belding", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [ |
|
"Yang" |
|
], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1906.08976" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tony Sun, Andrew Gaut, Shirlyn Tang, Yuxin Huang, Mai ElSherief, Jieyu Zhao, Diba Mirza, Eliza- beth Belding, Kai-Wei Chang, and William Yang Wang. 2019. Mitigating gender bias in natural lan- guage processing: Literature review. arXiv preprint arXiv:1906.08976.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "What are the biases in my word embedding?", |
|
"authors": [ |
|
{ |
|
"first": "Nathaniel", |
|
"middle": [], |
|
"last": "Swinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "De-Arteaga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Neil", |
|
"middle": [ |
|
"Thomas" |
|
], |
|
"last": "Heffernan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Mark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Leiserson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam Tauman", |
|
"middle": [], |
|
"last": "Kalai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 AAAI/ACM Conference on AI, Ethics, and Society", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "305--311", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nathaniel Swinger, Maria De-Arteaga, Neil Thomas Heffernan IV, Mark DM Leiserson, and Adam Tau- man Kalai. 2019. What are the biases in my word embedding? In Proceedings of the 2019 AAAI/ACM Conference on AI, Ethics, and Society, pages 305- 311.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Gender and dialect bias in youtube's automatic captions", |
|
"authors": [ |
|
{ |
|
"first": "Rachael", |
|
"middle": [], |
|
"last": "Tatman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the first ACL workshop on ethics in natural language processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "53--59", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rachael Tatman. 2017. Gender and dialect bias in youtube's automatic captions. In Proceedings of the first ACL workshop on ethics in natural language processing, pages 53-59.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "Inuit gender parity and why it was not accepted in the nunavut legislature. \u00c9tudes/Inuit/Studies", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Laakkuluk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Williamson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "", |
|
"volume": "30", |
|
"issue": "", |
|
"pages": "51--68", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Laakkuluk J. Williamson. 2006. Inuit gender parity and why it was not accepted in the nunavut legislature. \u00c9tudes/Inuit/Studies, 30(1):51-68.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "Gender bias in multilingual embeddings and crosslingual transfer", |
|
"authors": [ |
|
{ |
|
"first": "Jieyu", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Subhabrata", |
|
"middle": [], |
|
"last": "Mukherjee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Saghar", |
|
"middle": [], |
|
"last": "Hosseini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ahmed", |
|
"middle": [ |
|
"Hassan" |
|
], |
|
"last": "Awadallah", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2005.00699" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jieyu Zhao, Subhabrata Mukherjee, Saghar Hosseini, Kai-Wei Chang, and Ahmed Hassan Awadallah. 2020. Gender bias in multilingual embeddings and cross- lingual transfer. arXiv preprint arXiv:2005.00699.", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "Men also like shopping: Reducing gender bias amplification using corpus-level constraints", |
|
"authors": [ |
|
{ |
|
"first": "Jieyu", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianlu", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Yatskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vicente", |
|
"middle": [], |
|
"last": "Ordonez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1707.09457" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jieyu Zhao, Tianlu Wang, Mark Yatskar, Vicente Ordonez, and Kai-Wei Chang. 2017. Men also like shopping: Reducing gender bias amplifica- tion using corpus-level constraints. arXiv preprint arXiv:1707.09457.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "SENT Debias Algorithm (Liang et al., 2020)." |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "INLP Algorithm" |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Example of biased clusters from original to debiased states, using t-distributed stochastic neighbor embedding (t-SNE)" |
|
}, |
|
"TABREF0": { |
|
"html": null, |
|
"text": "", |
|
"content": "<table/>", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF2": { |
|
"html": null, |
|
"text": "", |
|
"content": "<table><tr><td colspan=\"3\">: Fasttext WEAT results, with significance of p-value, for three methods such as Sent debias, INLP, and Hard debias. Bold values are better.</td></tr><tr><td colspan=\"3\">effective in every model. An example of the list of</td></tr><tr><td colspan=\"3\">words used is illustrated below in Table 3.</td></tr><tr><td/><td colspan=\"2\">WEAT words list example</td></tr><tr><td/><td>Category</td><td>Inuktitut</td></tr><tr><td>0</td><td>family</td><td>angajuqqaaq</td></tr><tr><td>1</td><td>prof</td><td>executive</td></tr><tr><td>2</td><td>prof</td><td>ilisaiji</td></tr><tr><td>3</td><td>male names</td><td>jaan</td></tr><tr><td>4</td><td>female</td><td>maata</td></tr><tr><td/><td>names</td><td/></tr></table>", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"html": null, |
|
"text": "Example of WEAT words list", |
|
"content": "<table/>", |
|
"num": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |