|
{ |
|
"paper_id": "2022", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T05:10:36.921973Z" |
|
}, |
|
"title": "Towards a Multi-Entity Aspect-Based Sentiment Analysis for Characterizing Directed Social Regard in Online Messaging", |
|
"authors": [ |
|
{ |
|
"first": "Joan", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Scott", |
|
"middle": [], |
|
"last": "Friedman", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Sonja", |
|
"middle": [], |
|
"last": "Schmer-Galunder", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ian", |
|
"middle": [], |
|
"last": "Magnusson", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Ruta", |
|
"middle": [], |
|
"last": "Wheelock", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Jeremy", |
|
"middle": [], |
|
"last": "Gottlieb", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Diana", |
|
"middle": [], |
|
"last": "Gomez", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Miller", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Online messaging is dynamic, influential, and highly contextual, and a single post may contain contrasting sentiments towards multiple entities, such as dehumanizing one actor while empathizing with another in the same message. These complexities are important to capture for understanding the systematic abuse voiced within an online community, or for determining whether individuals are advocating for abuse, opposing abuse, or simply reporting abuse. In this work, we describe a formulation of directed social regard (DSR) as a problem of multi-entity aspect-based sentiment analysis (ME-ABSA), which models the degree of intensity of multiple sentiments that are associated with entities described by a text document. Our DSR schema is informed by Bandura's psychosocial theory of moral disengagement and by recent work in ABSA. We present a dataset of over 2,900 posts and sentences, comprising over 24,000 entities annotated for DSR over nine psychosocial dimensions by three annotators. We present a novel transformer-based ME-ABSA model for DSR, achieving favorable preliminary results on this dataset.", |
|
"pdf_parse": { |
|
"paper_id": "2022", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Online messaging is dynamic, influential, and highly contextual, and a single post may contain contrasting sentiments towards multiple entities, such as dehumanizing one actor while empathizing with another in the same message. These complexities are important to capture for understanding the systematic abuse voiced within an online community, or for determining whether individuals are advocating for abuse, opposing abuse, or simply reporting abuse. In this work, we describe a formulation of directed social regard (DSR) as a problem of multi-entity aspect-based sentiment analysis (ME-ABSA), which models the degree of intensity of multiple sentiments that are associated with entities described by a text document. Our DSR schema is informed by Bandura's psychosocial theory of moral disengagement and by recent work in ABSA. We present a dataset of over 2,900 posts and sentences, comprising over 24,000 entities annotated for DSR over nine psychosocial dimensions by three annotators. We present a novel transformer-based ME-ABSA model for DSR, achieving favorable preliminary results on this dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The social media landscape is a complex, dynamic information environment where actors express advocacy, opposition, empathy, dehumanization, and various moralistic signals, with the intent-or sometimes the side-effect-of influencing others. A single message may also express multiple sentiments in one sentence, e.g., opposing one political candidate and endorsing another, or blaming one party for harming another, or dehumanizing one party and empathizing with another.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The complexity of multiple sentiments-which may comprise multiple strategies of influence-in a single message means that classifying an entire tweet's sentiment (Da Silva et al., 2014) , or even quantifying it (Gao and Sebastiani, 2016) , along a single dimension, is both at too high a granularity (i.e., we want to assess the author's perspective on multiple topics) and at too few dimensions (i.e., we want to assess the author's perspective along multiple dimensions).", |
|
"cite_spans": [ |
|
{ |
|
"start": 161, |
|
"end": 184, |
|
"text": "(Da Silva et al., 2014)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 210, |
|
"end": 236, |
|
"text": "(Gao and Sebastiani, 2016)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Aspect-based sentiment analysis (ABSA) (Yang et al., 2018) , allowing multiple dimensions of sentiment on a message, gets us part-way to a solution. Multi-entity ABSA (ME-ABSA) (Tao and Fang, 2020) gets us further in this direction by classifying along multiple dimensions across entities, but these models are frequently expressed as classification problems (e.g., positive, neutral, and negative predictions), and we desire a finer-grained numerical approach.", |
|
"cite_spans": [ |
|
{ |
|
"start": 39, |
|
"end": 58, |
|
"text": "(Yang et al., 2018)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 177, |
|
"end": 197, |
|
"text": "(Tao and Fang, 2020)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In the present work, we present a novel multientity transformer-based ABSA regression implementation of directed social regard (DSR), the prediction of social attitudes directed toward various actors and topics mentioned in the text. Social attitudes are modelled along nine continously-valued sentiment aspects: advocate, oppose, dehumanization, empathy, violent, condemn, justified, responsible, and harmed. Masked language modelling methods are utilized to support sets of aspects associated with each unique entity type. In the present work, DSR is computed for each character (i.e., human individual, human group, or ideology) in a message and each event that harms characters within a message. Also in the present work, the DSR dimensions are informed in part by Bandura's psychosocial theory of moral disengagement (Bandura, 1999 (Bandura, , 2016 , which we describe below.", |
|
"cite_spans": [ |
|
{ |
|
"start": 822, |
|
"end": 836, |
|
"text": "(Bandura, 1999", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 837, |
|
"end": 853, |
|
"text": "(Bandura, , 2016", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To implement and validate our approach, three labelers rated nine dimensions of social regard for each character and event in a dataset of Englishlanguage social media posts sourced from curated Twitter datasets. To model DSR, we desitned a transformer-based regression architecture designed specifically for fine-grained sentiment analysis of \"I vomit on you and all your lost sheep. The fght is not over and will never be. RESISTANCE!\" \"I\" (morally evaluable agent) \"you\" (morally evaluable agent) \"vomit on\" (adverse event) responsible impacted \"your lost sheep\" (morally evaluable agent) impacted \"The fght\" (adverse event) Figure 1 : NLP output from \"I vomit on you and your lost sheep. The fight is not over and never will be.\" adapted from a Kaggle social media dataset. multiple entities.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 628, |
|
"end": 636, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We next describe the psychosocial theory of moral disengagement. We then describe our approach and empirical results, closing with a discussion of limitations and future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "People have the capacity for compassion and cruelty toward others-and both at the same timedepending on their moral values and on whom they include and exclude in their category of humanity (Bandura, 1999 (Bandura, , 2016 . These are matters of moral disengagement, the psychosocial mechanisms of selectively disengaging self-sanctions from inhumane or detrimental conduct.", |
|
"cite_spans": [ |
|
{ |
|
"start": 190, |
|
"end": 204, |
|
"text": "(Bandura, 1999", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 205, |
|
"end": 221, |
|
"text": "(Bandura, , 2016", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Moral Disengagement", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "Evidence of moral disengagement is present in modern hate speech: social media contains calls to violence against outsiders (Kennedy et al., 2018; Hoover et al., 2020) ; online forums dehumanize girls and women (Ging, 2019; Hoffman et al., 2020) ; and the manifestos of violent actors justify their actions by dehumanizing and blaming others (Peters et al., 2019) . We have evidence that hate speech with these indicators increases prejudice through desensitization (Soral et al., 2018) -and that the frequency of this language is related to the frequency of violent acts in the world (Olteanu et al., 2018) -so understanding moral disengagement has real-world importance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 124, |
|
"end": 146, |
|
"text": "(Kennedy et al., 2018;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 147, |
|
"end": 167, |
|
"text": "Hoover et al., 2020)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 211, |
|
"end": 223, |
|
"text": "(Ging, 2019;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 224, |
|
"end": 245, |
|
"text": "Hoffman et al., 2020)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 342, |
|
"end": 363, |
|
"text": "(Peters et al., 2019)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 466, |
|
"end": 486, |
|
"text": "(Soral et al., 2018)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 585, |
|
"end": 607, |
|
"text": "(Olteanu et al., 2018)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Moral Disengagement", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "We describe our knowlege graph and attibute schema, sources of textual data, annotation process, and our architecture for representing and scoring attributes of social regard.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approach", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Our DSR schema for a single social media post includes (1) a simple knowledge graph representation adapted from previous work in social media NLP (withheld for review), and (2) nine numerical intensity ratings on said characters and events to capture the directed social regard of the author, which is the primary focus of this work. An example of the system's output for a public Kaggle dataset tweet is shown in Figure 1 . This was not part of our training dataset, so this is a novel machine prediction. We use this example to describe our schema.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 414, |
|
"end": 422, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "DSR Schema", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The knowledge graph contains two types of entities, each comprising a span (i.e., contiguous span of tokens) in the text: (1) characters, also known as morally evaluable agents, comprising the author, human individuals, ethnicities, organizations, religions, ideologies, and geopolitical entities, and (2) adverse events that may cause harm or be morally questionable as described by the author. In Figure 1 , the characters are \"I,\" \"you,\" and \"your lost sheep,\" since the latter was inferred to refer to people in this context. The events include \"vomit on\" and \"the fight.\"", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 399, |
|
"end": 407, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "DSR Schema", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The DSR values capture sentiment according to dimensions of moral disengagement described above, in addition to sentiment analysis, as expressed by the author of the text. For each dimension we describe whether it was motivated by Bandura's (1999 Bandura's ( , 2016 moral disengagement theory B or by sentiment analysis S and whether it applies to characters c or events e or both.", |
|
"cite_spans": [ |
|
{ |
|
"start": 231, |
|
"end": 246, |
|
"text": "Bandura's (1999", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 247, |
|
"end": 265, |
|
"text": "Bandura's ( , 2016", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "DSR Schema", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "1. Advocate: Endorsement or support of an entity by the author. S,c,e 2. Oppose: Opposition or adversarial attitude to an entity by the author. S,c,e 3. Dehumanization: Actor described with nonhuman or lesser-than-human attributes, diminishing their agency or humanity. B,c 4. Empathy: Actor described with empathy, compassion, humanity. B,c 5. Violent: Event described as having literal or metaphorical physical or sexual violence. B,e 6. Condemn: Entity morally condemned. B,c,e 7. Justified: Entity morally justified. B,c,e 8. Responsible (for harm): Actor described as causing harm to others or to themselves. B,c 9. Harmed: Actor described as being harmed by themselves or others. B,c Each of the Bandura-motivated dimensions captures a factor of moral disengagement: diminishing or accentuating humanity indicates whether the author might include the target in their circle of humanity; descriptions of violence and responsibility for harm are indicators of blame or advocacy for violence; mention of harmed individuals (including oneself) is an indicator of victimization and potential justification of subsequent action; and moral condemnation and justification indicate a moral standpoint for adverse events.", |
|
"cite_spans": [ |
|
{ |
|
"start": 686, |
|
"end": 689, |
|
"text": "B,c", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "DSR Schema", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The heat-map in Figure 1 shows the nine moral dimensions across all of the characters and events from this example, where \"your lost sheep\" are the ony ones dehumanized.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 16, |
|
"end": 24, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "DSR Schema", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Documents were selected from text posts known to contain online abuse or hate speech, including the Moral Foundations Twitter Corpus (Hoover et al., 2020) ; the Gab Hate Corpus (Kennedy et al., 2018) ; How ISIS Uses Twitter dataset from Kaggle (Khuram, 2017) ; and Manosphere community text posts (Ribeiro et al., 2020).", |
|
"cite_spans": [ |
|
{ |
|
"start": 133, |
|
"end": 154, |
|
"text": "(Hoover et al., 2020)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 177, |
|
"end": 199, |
|
"text": "(Kennedy et al., 2018)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 244, |
|
"end": 258, |
|
"text": "(Khuram, 2017)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset and Annotation Methodology", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "To optimize for content eligible for fine-grained sentiment analysis, documents were considered only if they met three criteria: (1) written in 280 or fewer characters; (2) written in English words or emoticons; and (3) contained more content than user mentions, URLs, or links to images.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset and Annotation Methodology", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Three English speakers were hired on the Prolific survey platform (Palan and Schitter, 2018) to score entities for DSR attributes. Out of our collected documents, 2,907 documents that met our criteria were annotated by at least two of our human annotators. These annotations contain a total of 24,425 unique entities. Annotators were asked to rate entities for each sentiment using a scale ranging from zero (not present) to five (most intense).", |
|
"cite_spans": [ |
|
{ |
|
"start": 66, |
|
"end": 92, |
|
"text": "(Palan and Schitter, 2018)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset and Annotation Methodology", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "To measure inter-annotator agreement between our three human raters, we compute Krippendorff's \u03b1 (Krippendorff, 2011) for each of the nine aspects, as shown in Table 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 97, |
|
"end": 117, |
|
"text": "(Krippendorff, 2011)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 160, |
|
"end": 167, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset and Annotation Methodology", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "For drawing tentative conclusions, Krippendorff recommends using variables with reliabilities above \u03b1 = 0.667 (Krippendorff, 2018) , which are achieved by our aspects violent and oppose. Both these aspects were labeled with intensity 4-5 more frequently compared to other aspects. For training and testing purposes, we identified annotations with high agreement as those where annotators falling within two standard units of each other, and with a maximum difference of two intensity units. These selection criteria limit disagreements while maintaining moderate-intensity aspects.", |
|
"cite_spans": [ |
|
{ |
|
"start": 110, |
|
"end": 130, |
|
"text": "(Krippendorff, 2018)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset and Annotation Methodology", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "We used two transformer-based NLP models: (1) an entity-and relation-extractor based on the SpERT architecture (Eberts and Ulges, 2020) to extract characters and entities comprising one or more continuous tokens in the text and (2) a novel ABSA-based model that scores each character or entity for the applicable DSR dimensions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 111, |
|
"end": 135, |
|
"text": "(Eberts and Ulges, 2020)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Architecture", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Importantly, for training and testing the DSR performance, we only use the human-annotated characters and events; we do not train or test the DSR model on machine-predicted entities, but this is how we envision applying the model on novel texts. We focus on the ABSA/DSR in this paper.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Architecture", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "ABSA/DSR Architecture. The input for the DSR ABSA model is a text with entities annotated with (1) token start/end indices and (2) entity type (i.e., character or event). These may be either manually annotated (as we have done in our evaluation) or automatically predicted from a entity recognition system, e.g., (Eberts and Ulges, 2020; Friedman et al., 2021) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 313, |
|
"end": 337, |
|
"text": "(Eberts and Ulges, 2020;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 338, |
|
"end": 360, |
|
"text": "Friedman et al., 2021)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Architecture", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "As shown in Figure 2 , the text document is processed by a pre-trained BERT (Devlin et al., 2019) embedding layer using wordpiece tokenization. An interaction layer creates a fixed-dimentional pooled matrix, which contains a concatenation of BERTencoded document and its entities represented as masked token sequences, the collection of masks for each entity type, and the lengths of each token span. These separate sequences are concatenated together as a matrix to support batch evaluation along multiple entities by the linear aspect classifiers.", |
|
"cite_spans": [ |
|
{ |
|
"start": 76, |
|
"end": 97, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 20, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Architecture", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "This matrix representation feeds into a separate linear layers for each DSR aspect. Which entity gets graded by each linear layer is determined by the type of entity (e.g., as shown in Figure 1 , an event entity does not have a dehumanized DSR aspect). This is implemented when multiplying the concatenated input matrix by the entity mask, which creates a matrix with nonzero inputs at the same indices as the linear layers it is eligible to be scored by. A softmax activation function calcuates the prediction associated with each aspect.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 185, |
|
"end": 193, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Architecture", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "We evaluated the DSR/ABSA architecture on the above dataset with the above DSR schema. We used human-labeled characters and events as inputs for this experiment in order to focus the evaluation on the DSR rather than the span extraction, but we report that on a 90/10 train/test split, the entity extractor scored F1 scores of 0.95 and 0.73 for extracting characters and events, allowing determiner mismatch, e.g., an event \"the airstrikes\" is allowed to match to \"airstrikes.\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiment", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We use the pre-trained, case-sensitive BERTbase model for fine-tuning (12 transformer blocks, 768-size hidden layer, 12 attention heads, and 110M total parameters). We fine-tuned with dropout probability 0.1 for 3 epochs, and we trained with learning rate 2e-5. Train, evaluation, and test splits were generated from our social media dataset using by creating 60/20/20 splits. Results. Results are shown in Table 2 , with lowest error (i.e., RMSE) on violent, dehumanized, and empathy dimensions. As mentioned above, violent was one of the more intensely-rated aspects and had highest \u03b1 score, so we believe this contributed to successful learning. The aspect dehumanized-and its dual, empathy-are central to Bandura's theory of moral disengagement. The average RMSE across aspects was 1.00 of a 5-point intensity scale, and all R 2 results directly correlated, explaining between 11-29% of variance in annotators' intensity scores across aspects. We regard these results as preliminary but encouraging for continued work in this domain.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 407, |
|
"end": 414, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiment", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We have described an approach to encoding the directed social regard (DSR) of authors toward events and actors in their posts, informed by Bandura's (1999 Bandura's ( , 2016 psychosocial theory of moral disengagement. This helps characterize abuse and harm in online messaging, including the advocacy and opposition to said abuse and harm, by highlighting entities that are associated with aspects associated with moral disengagement.", |
|
"cite_spans": [ |
|
{ |
|
"start": 139, |
|
"end": 154, |
|
"text": "Bandura's (1999", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 155, |
|
"end": 173, |
|
"text": "Bandura's ( , 2016", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and Future Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Our transformer-based approach uses a multientity aspect-based sentiment analysis (ME-ABSA) treatment to represent and predict DSR across nine psychosocial dimensions. We provide empirical evidence that transformer-based architectures can detect relevant actors and events and then predict human DSR ratings within reasonable preliminary error bounds.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and Future Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Limitations and Future Work. One factor likely reducing the performance of our DSR model is the imbalanced representation of sentiment labels in our dataset. There is a scarcity of examples in our dataset of entities that are associated with some sentiments, particularly moderate to positive sentiments labels and sentiments with low to moderate degrees of intensity. As shown in Table 1 , annotators used aspect labels empathy and justified less frequently than other sentiment aspects in our schema, and was not able to reach a reliably high degree of agreement when annotating these sentiments. To improve the capability of our directed social regard model for applications outside of the domain of online abuse and hate, it would be beneficial to learn from examples that contain a more diverse selection of sentiments expressed, such as examples associated with positive to neutral sentiments as well as examples that contain a balanced range of low, moderate, and high degrees of intensity.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 381, |
|
"end": 388, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion and Future Work", |
|
"sec_num": "4" |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This material is based upon work supported by the Defense Advanced Research Projects Agency (DARPA) under Contract No. HR001121C0186 and Contract No. FA86650-19-6017. Any opinions, findings and conclusions or recommendations expressed in this material are those of the authors and do not necessarily reflect the views of the Defense Advanced Research Projects Agency (DARPA).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Moral disengagement in the perpetration of inhumanities", |
|
"authors": [ |
|
{ |
|
"first": "Albert", |
|
"middle": [], |
|
"last": "Bandura", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Personality and social psychology review", |
|
"volume": "3", |
|
"issue": "3", |
|
"pages": "193--209", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Albert Bandura. 1999. Moral disengagement in the perpetration of inhumanities. Personality and social psychology review, 3(3):193-209.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Moral disengagement: How people do harm and live with themselves", |
|
"authors": [ |
|
{ |
|
"first": "Albert", |
|
"middle": [], |
|
"last": "Bandura", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Albert Bandura. 2016. Moral disengagement: How people do harm and live with themselves. Worth publishers.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Tweet sentiment analysis with classifier ensembles. Decision support systems", |
|
"authors": [ |
|
{ |
|
"first": "Nadia Ff Da", |
|
"middle": [], |
|
"last": "Silva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduardo", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Hruschka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Estevam R Hruschka", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "66", |
|
"issue": "", |
|
"pages": "170--179", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nadia FF Da Silva, Eduardo R Hruschka, and Este- vam R Hruschka Jr. 2014. Tweet sentiment analysis with classifier ensembles. Decision support systems, 66:170-179.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of NAACL-HLT 2019", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of NAACL-HLT 2019, pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Span-based joint entity and relation extraction with transformer pre-training", |
|
"authors": [ |
|
{ |
|
"first": "Markus", |
|
"middle": [], |
|
"last": "Eberts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adrian", |
|
"middle": [], |
|
"last": "Ulges", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "24th European Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Markus Eberts and Adrian Ulges. 2020. Span-based joint entity and relation extraction with transformer pre-training. 24th European Conference on Artifi- cial Intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "From unstructured text to causal knowledge graphs: A transformerbased approach", |
|
"authors": [ |
|
{ |
|
"first": "Scott", |
|
"middle": [], |
|
"last": "Friedman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ian", |
|
"middle": [], |
|
"last": "Magnusson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vasanth", |
|
"middle": [], |
|
"last": "Sarathy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sonja", |
|
"middle": [], |
|
"last": "Schmer-Galunder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 2021 Conference on Advances in Cognitive Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Scott Friedman, Ian Magnusson, Vasanth Sarathy, and Sonja Schmer-Galunder. 2021. From unstructured text to causal knowledge graphs: A transformer- based approach. In Proceedings of the 2021 Con- ference on Advances in Cognitive Systems.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "From classification to quantification in tweet sentiment analysis. Social Network Analysis and Mining", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fabrizio", |
|
"middle": [], |
|
"last": "Sebastiani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "1--22", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei Gao and Fabrizio Sebastiani. 2016. From classifi- cation to quantification in tweet sentiment analysis. Social Network Analysis and Mining, 6(1):1-22.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Alphas, betas, and incels: Theorizing the masculinities of the manosphere", |
|
"authors": [ |
|
{ |
|
"first": "Debbie", |
|
"middle": [], |
|
"last": "Ging", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Men and Masculinities", |
|
"volume": "22", |
|
"issue": "4", |
|
"pages": "638--657", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Debbie Ging. 2019. Alphas, betas, and incels: Theoriz- ing the masculinities of the manosphere. Men and Masculinities, 22(4):638-657.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Assessing the threat of incel violence", |
|
"authors": [ |
|
{ |
|
"first": "Bruce", |
|
"middle": [], |
|
"last": "Hoffman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Ware", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ezra", |
|
"middle": [], |
|
"last": "Shapiro", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Studies in Conflict & Terrorism", |
|
"volume": "43", |
|
"issue": "7", |
|
"pages": "565--587", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bruce Hoffman, Jacob Ware, and Ezra Shapiro. 2020. Assessing the threat of incel violence. Studies in Conflict & Terrorism, 43(7):565-587.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Moral foundations twitter corpus: A collection of 35k tweets annotated for moral sentiment", |
|
"authors": [ |
|
{ |
|
"first": "Joe", |
|
"middle": [], |
|
"last": "Hoover", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gwenyth", |
|
"middle": [], |
|
"last": "Portillo-Wightman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leigh", |
|
"middle": [], |
|
"last": "Yeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shreya", |
|
"middle": [], |
|
"last": "Havaldar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aida", |
|
"middle": [ |
|
"Mostafazadeh" |
|
], |
|
"last": "Davani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ying", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brendan", |
|
"middle": [], |
|
"last": "Kennedy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Atari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zahra", |
|
"middle": [], |
|
"last": "Kamel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Madelyn", |
|
"middle": [], |
|
"last": "Mendlen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Social Psychological and Personality Science", |
|
"volume": "11", |
|
"issue": "8", |
|
"pages": "1057--1071", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joe Hoover, Gwenyth Portillo-Wightman, Leigh Yeh, Shreya Havaldar, Aida Mostafazadeh Davani, Ying Lin, Brendan Kennedy, Mohammad Atari, Zahra Kamel, Madelyn Mendlen, et al. 2020. Moral foun- dations twitter corpus: A collection of 35k tweets annotated for moral sentiment. Social Psychologi- cal and Personality Science, 11(8):1057-1071.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "The gab hate corpus: A collection of 27k posts annotated for hate speech", |
|
"authors": [ |
|
{ |
|
"first": "Brendan", |
|
"middle": [], |
|
"last": "Kennedy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Atari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aida", |
|
"middle": [ |
|
"Mostafazadeh" |
|
], |
|
"last": "Davani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leigh", |
|
"middle": [], |
|
"last": "Yeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ali", |
|
"middle": [], |
|
"last": "Omrani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yehsong", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kris", |
|
"middle": [], |
|
"last": "Coombs", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shreya", |
|
"middle": [], |
|
"last": "Havaldar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gwenyth", |
|
"middle": [], |
|
"last": "Portillo-Wightman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elaine", |
|
"middle": [], |
|
"last": "Gonzalez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Brendan Kennedy, Mohammad Atari, Aida Mostafazadeh Davani, Leigh Yeh, Ali Omrani, Yehsong Kim, Kris Coombs, Shreya Haval- dar, Gwenyth Portillo-Wightman, Elaine Gonzalez, et al. 2018. The gab hate corpus: A collection of 27k posts annotated for hate speech.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "How isis uses twitter", |
|
"authors": [ |
|
{ |
|
"first": "Zaman", |
|
"middle": [], |
|
"last": "Khuram", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zaman Khuram. 2017. How isis uses twitter.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Computing krippendorff's alpha-reliability", |
|
"authors": [ |
|
{ |
|
"first": "Klaus", |
|
"middle": [], |
|
"last": "Krippendorff", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Klaus Krippendorff. 2011. Computing krippendorff's alpha-reliability.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Content analysis: An introduction to its methodology", |
|
"authors": [ |
|
{ |
|
"first": "Klaus", |
|
"middle": [], |
|
"last": "Krippendorff", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Klaus Krippendorff. 2018. Content analysis: An intro- duction to its methodology. Sage publications.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "The effect of extremist violence on hateful speech online", |
|
"authors": [ |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Olteanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carlos", |
|
"middle": [], |
|
"last": "Castillo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeremy", |
|
"middle": [], |
|
"last": "Boy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kush", |
|
"middle": [], |
|
"last": "Varshney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the International AAAI Conference on Web and Social Media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexandra Olteanu, Carlos Castillo, Jeremy Boy, and Kush Varshney. 2018. The effect of extremist vio- lence on hateful speech online. In Proceedings of the International AAAI Conference on Web and So- cial Media.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Prolific. ac-a subject pool for online experiments", |
|
"authors": [ |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Palan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Schitter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Journal of Behavioral and Experimental Finance", |
|
"volume": "17", |
|
"issue": "", |
|
"pages": "22--27", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stefan Palan and Christian Schitter. 2018. Prolific. ac-a subject pool for online experiments. Journal of Behavioral and Experimental Finance, 17:22-27.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "How the El Paso Killer Echoed the Incendiary Words of Conservative Media Stars", |
|
"authors": [ |
|
{ |
|
"first": "Jeremy", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Grynbaum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Keith", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rich", |
|
"middle": [], |
|
"last": "Harris", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rumsey", |
|
"middle": [], |
|
"last": "Taylor", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeremy Peters, Michael Grynbaum, Keith Collins, Rich Harris, and Rumsey Taylor. 2019. How the El Paso Killer Echoed the Incendiary Words of Conservative Media Stars.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Summer Long, Stephanie Greenberg, and Savvas Zannettou. 2020. From pick-up artists to incels: a data-driven sketch of the manosphere", |
|
"authors": [ |
|
{ |
|
"first": "Jeremy", |
|
"middle": [], |
|
"last": "Manoel Horta Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Blackburn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emiliano", |
|
"middle": [], |
|
"last": "Bradlyn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gianluca", |
|
"middle": [], |
|
"last": "De Cristofaro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Stringhini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2001.07600" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Manoel Horta Ribeiro, Jeremy Blackburn, Barry Brad- lyn, Emiliano De Cristofaro, Gianluca Stringhini, Summer Long, Stephanie Greenberg, and Savvas Zannettou. 2020. From pick-up artists to incels: a data-driven sketch of the manosphere. arXiv preprint arXiv:2001.07600.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Exposure to hate speech increases prejudice through desensitization", |
|
"authors": [ |
|
{ |
|
"first": "Wiktor", |
|
"middle": [], |
|
"last": "Soral", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Micha\u0142", |
|
"middle": [], |
|
"last": "Bilewicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miko\u0142aj", |
|
"middle": [], |
|
"last": "Winiewski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Aggressive behavior", |
|
"volume": "44", |
|
"issue": "2", |
|
"pages": "136--146", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wiktor Soral, Micha\u0142 Bilewicz, and Miko\u0142aj Winiewski. 2018. Exposure to hate speech increases preju- dice through desensitization. Aggressive behavior, 44(2):136-146.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Toward multi-label sentiment analysis: a transfer learning based approach", |
|
"authors": [ |
|
{ |
|
"first": "Jie", |
|
"middle": [], |
|
"last": "Tao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xing", |
|
"middle": [], |
|
"last": "Fang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Journal of Big Data", |
|
"volume": "7", |
|
"issue": "1", |
|
"pages": "1--26", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jie Tao and Xing Fang. 2020. Toward multi-label sen- timent analysis: a transfer learning based approach. Journal of Big Data, 7(1):1-26.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Multi-entity aspect-based sentiment analysis with context, entity and aspect memory", |
|
"authors": [ |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Runqi", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chongjun", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junyuan", |
|
"middle": [], |
|
"last": "Xie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jun Yang, Runqi Yang, Chongjun Wang, and Junyuan Xie. 2018. Multi-entity aspect-based sentiment anal- ysis with context, entity and aspect memory. In Pro- ceedings of the AAAI Conference on Artificial Intel- ligence, volume 32.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "An overview of the ABSA architecture optimized for the DSR task.", |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF1": { |
|
"text": "", |
|
"content": "<table><tr><td>: Nonzero label usage comparision across our three annotators (A1-3) across 24,245 entities and nine aspect labels, along a five point intensity scale. Also includes Krippendorff's \u03b1.</td></tr></table>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"text": "ABSA/DSR model performance: R 2 measures correlation between human and machine ratings and RMSE measures prediction error. Averaged RMSE is 1.00 out of five units of intensity.", |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |