|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T12:30:55.309368Z" |
|
}, |
|
"title": "Suicide Risk Prediction by Tracking Self-Harm Aspects in Tweets: NUS-IDS at the CLPsych 2021 Shared Task", |
|
"authors": [ |
|
{ |
|
"first": "Sujatha", |
|
"middle": [], |
|
"last": "Das Gollapalli", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National University of Singapore", |
|
"location": { |
|
"country": "Singapore" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Guilherme", |
|
"middle": [ |
|
"Augusto" |
|
], |
|
"last": "Zagatti", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National University of Singapore", |
|
"location": { |
|
"country": "Singapore" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "See-Kiong", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National University of Singapore", |
|
"location": { |
|
"country": "Singapore" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We describe our system for identifying users at-risk for suicide based on their tweets developed for the CLPsych 2021 Shared Task. Based on research in mental health studies linking self-harm tendencies with suicide, in our system, we attempt to characterize selfharm aspects expressed in user tweets over a period of time. To this end, we design SHT M , a Self-Harm Topic Model that combines Latent Dirichlet Allocation with a selfharm dictionary for modeling daily tweets of users. Next, differences in moods and topics over time are captured as features to train a deep learning model for suicide prediction.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We describe our system for identifying users at-risk for suicide based on their tweets developed for the CLPsych 2021 Shared Task. Based on research in mental health studies linking self-harm tendencies with suicide, in our system, we attempt to characterize selfharm aspects expressed in user tweets over a period of time. To this end, we design SHT M , a Self-Harm Topic Model that combines Latent Dirichlet Allocation with a selfharm dictionary for modeling daily tweets of users. Next, differences in moods and topics over time are captured as features to train a deep learning model for suicide prediction.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Social media portals provide outlets for people to express their thoughts and emotions, and researchers have noted that user writings on social media contain signs and symptoms of various mental disorders (Coppersmith et al., 2014) . Due to this reason, automated methods for identifying individuals \"at risk\" for various conditions such as depression, suicide, and addiction based on their online activity is an upcoming, recent research topic (Niederhoffer et al., 2019; Losada et al., 2020a) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 205, |
|
"end": 231, |
|
"text": "(Coppersmith et al., 2014)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 445, |
|
"end": 472, |
|
"text": "(Niederhoffer et al., 2019;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 473, |
|
"end": 494, |
|
"text": "Losada et al., 2020a)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we focus on suicide, a leading cause of mortality among younger population (Patton et al., 2009) and address the problem of identifying individuals at-risk for suicide as part of the CLPsych 2021 Shared Task. In particular, we make use of the well-established link between self-harm tendencies and suicide (Kidger et al., 2012; Losada et al., 2020b) and study the expression of self-harm moods in user tweets. Our contributions are as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 90, |
|
"end": 111, |
|
"text": "(Patton et al., 2009)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 321, |
|
"end": 342, |
|
"text": "(Kidger et al., 2012;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 343, |
|
"end": 364, |
|
"text": "Losada et al., 2020b)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We propose SHT M , a topic model for capturing the self-harm aspects expressed in user writings. SHT M uses self-harm dictionaries in a novel way within the Latent Dirichlet Allocation model to represent the topical as well as self-harm content expressed in a given text.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "SHT M extracts self-harm word groups that may be indicative of various mental health issues seen in at-risk persons.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Next, we characterize mood changes captured in the writings using SHT M and show that the topic and mood profiles of the \"control\" and \"at risk\" individuals over time are different. We use this information to design features for our deep learning based classification model and test them on the tweet datasets from the CLPsych 2021 Shared Task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Probabilistic topic models are widely-used in text mining and NLP research for their ability to extract latent topics from a given document collection in an unsupervised manner (Koltcov et al., 2014; Lin and He, 2009; Wei and Croft, 2006) . In particular, topic models based on Latent Dirichlet Allocation (Blei et al., 2003) were effectively used to characterize temporal topical trends and topical evolution (Bolelli et al., 2009; Lau et al., 2012; . We describe our extension to the well-known LDA model for handling self-harm content changes through SHTM our Topic Model for Self-Harm content. The document generative process in standard LDA is based on the assumption that a given document can be viewed as a mixture of latent topics. To model self-harm aspects expressed in text, we make use of a dictionary comprising of expert-compiled words commonly-used by individuals engaging in self-harm activities (D SH ) and \"split\" the document text based on whether a word is found in D SH or V (the rest of the vocabulary). That is, we assume that the presence of a word from D SH indicates a Self-Harm Mood (SHM) expressed by the user whereas other words express \"regular\" topics. K and E refer to the number of topics and self-harm aspects, respectively, while z and u refer to their corresponding latent variables for a particular tweet, respectively. The words sampled from the latent SHM and topics distributions are represented by m and w respectively. \u03b1t, \u03b1e, \u03b2t, \u03b2e are Dirichlet hyperparameters. (Heinrich, 2005) Based on the above premise, each word in the text generation process of SHT M is either conditioned on a latent topic t, or a latent self-harm mood e, and a given document is a mixture of topics \u03b8 t (as in regular LDA) as well as a mixture of SHMs \u03b8 e (which includes \"NoSH or no self-harm\" mood). The plate diagram for SHT M is shown in Figure 1 . We refer the interested reader to Heinrich (2005) for the derivations for the sampling equations due to space constraints.", |
|
"cite_spans": [ |
|
{ |
|
"start": 177, |
|
"end": 199, |
|
"text": "(Koltcov et al., 2014;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 200, |
|
"end": 217, |
|
"text": "Lin and He, 2009;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 218, |
|
"end": 238, |
|
"text": "Wei and Croft, 2006)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 306, |
|
"end": 325, |
|
"text": "(Blei et al., 2003)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 410, |
|
"end": 432, |
|
"text": "(Bolelli et al., 2009;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 433, |
|
"end": 450, |
|
"text": "Lau et al., 2012;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 1507, |
|
"end": 1523, |
|
"text": "(Heinrich, 2005)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1862, |
|
"end": 1870, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "SHT M : Our Topic Model", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "In SHT M , the topic assignment process (operating on all words in V) is exactly the same as in standard LDA, whereas the self-harm mood assignments though similar, work only on words from D SH . Furthermore, input texts with no words from D SH are directly assigned the \"NoSH\" mood. We posit that via this distinction of words based on their presence in D SH , we can capture both the topical content and self-harm moods of a text directly via SHT M 's topical and mood dimensions. That is, similar to how a given document can be represented using its topic proportion vector (in a reduced dimension) in standard LDA, using SHT M , each user-generated text can be represented using a topic proportion vector as well as an SHM proportion vector and these vectors can be used to track changes along time when temporal information is available.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SHT M : Our Topic Model", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "That is, let . . . w t\u22121 , w t , w t+1 . . . represent a sequence of writings for a given user. To track the change in mood for the user at timepoint t, given a context window w, we use the averaged SHM vectors for w t\u2212w . . . w t\u22121 and compute the difference between this average vector and the SHM vector for w t using measures such as cosine distance or KL divergence (Hall et al., 2008; Gollapalli and Li, 2015) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 371, |
|
"end": 390, |
|
"text": "(Hall et al., 2008;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 391, |
|
"end": 415, |
|
"text": "Gollapalli and Li, 2015)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SHT M : Our Topic Model", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We used a deep learning model based on Long Short-Term Memory (LSTM) shown in Figure 2 . Since both LSTMs and term feature vectors are effective for text classification problems (Aggarwal and Zhai, 2012; Pouyanfar et al., 2018) , our model aims to combine the benefits of both via a twopart setup in which the output from the LSTM which captures the sequence information present in textual content is combined with aggregate features such as normalized term frequencies and SHT Mbased features. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 178, |
|
"end": 203, |
|
"text": "(Aggarwal and Zhai, 2012;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 204, |
|
"end": 227, |
|
"text": "Pouyanfar et al., 2018)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 78, |
|
"end": 86, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Our LSTM Classification Model", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Data: The dataset for the CLPsych 2021 Shared Task contains Twitter posts of users who attempted or committed suicide, and control individuals collected from OurDataHelps (ODH). 1 The competition involves two subtasks: \"Prediction of a suicide attempt 30 days prior\" (ODH30) and \"Prediction of suicide attempt 6 months prior\" (ODH182). We refer the reader to the overview paper of the CLPsych 2021 Shared Task (Macavaney et al., 2021) for further details on the data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 410, |
|
"end": 434, |
|
"text": "(Macavaney et al., 2021)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments and Results", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Briefly, the datasets for both tasks are fairly balanced containing roughly equal number of positive and control users as well as tweets. For the ODH182 and ODH30 subtasks, the training datasets comprise 162 and 109 users and 13K and 2K tweets, respectively. The test datasets comprise about 20 percent of the number of users available for training. The Shared Task also provides access to two other datasets: (1) a Practice Dataset (PD) comprising of tweets of users with '#depression' or similar hashtag 2 and (2) the University of Maryland (UMD) Suicidality Dataset based on Reddit posts (Zirikly et al., 2019; Shing et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 591, |
|
"end": 613, |
|
"text": "(Zirikly et al., 2019;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 614, |
|
"end": 633, |
|
"text": "Shing et al., 2018)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments and Results", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "As part of the task setup, all data was only accessible within a secure computing environment known as the UMD/National Opinion Research Center (NORC) Mental Health Data Enclave and all experiments were to be performed in this space. We refer the reader to MacAvaney, et al 2021for details of the Enclave and the challenges involved in performing experiments in such environments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments and Results", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Implementation Details: SHT M was implemented in Java by extending the topic model code provided in the Mallet toolkit (McCallum, 2002) . Default settings in Mallet were used for hyperparameter initialization and probability sampling. We tested three options including (a) All ODH data including the data provided for ODH30 and ODH182 tasks (ODH-only), (b) All ODH data and UMD data (ODH+UMD), and (c) All ODH and tweets from the Practice Dataset (ODH+PD). We used only data from relevant subreddits (picked manually based on term filters 'suicide', 'self-harm' and 'depression') for the UMD collection. Based on the word clusters extracted by SHT M for each SHM on a few choices of number of topics and SHM, we set the values of the number of topics and SHMs, respectively to (20, 5) for ODH-only, (15, 5) for ODH+UMD and (50, 10) for ODH+PD. SHT M assignments from these runs were used for computing features for classification.", |
|
"cite_spans": [ |
|
{ |
|
"start": 119, |
|
"end": 135, |
|
"text": "(McCallum, 2002)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments and Results", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We employed standard text mining normalization steps to process the tweets. That is, all stopwords, punctuation and tokens starting with \"@\", referring to URLs, and non-alphanumeric ones were removed and all content was lowercased. After employing a term frequency threshold of 3, the vocabulary size (V) is approximately 13K. For our self-harm word dictionary (D SH ), we curated words from the sources for Pyscholinguistic features used by Trifan et al (2020) to assemble a small list of 50 phrases corresponding to self-harm activities. Words in D SH include \"self-image\" \"bruises\", \"numbing\", and \"trauma\". 3 Incorporating Context and Sampling: In our tasks, while predictions need to be made at userlevel, we are given a sequence of time-stamped tweets with each user. Rather than create a single training instance clubbing all tweets available for a user, or creating a separate instance per tweet, we choose a middle ground based on the notion that from a practical standpoint, a classifier should be able to handle partial data availability rather than the entire 30 or 182 day periods. We enable this by creating multiple instances per user based on a context window parameter (w).", |
|
"cite_spans": [ |
|
{ |
|
"start": 442, |
|
"end": 461, |
|
"text": "Trifan et al (2020)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 611, |
|
"end": 612, |
|
"text": "3", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments and Results", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Let T t represents the set of all tweets posted on date t. For each user, we select all tweets generated from T t\u2212w+1 to T t inclusive to create a training instance. Starting from the last tweet posted by the user, we slide the window n times to obtain a maximum of n overlapping instances for each user. In this way, we can sample user tweets along different timepoints for training our models. 4 Classifier Settings: We experimented with emotion-enriched word embeddings (Agrawal et al., 2018) and GloVE (Pennington et al., 2014) word embeddings for representing text within LSTMs. The number of LSTM units were set to 50 with the sequence length set to 1000. The output from LSTMs and aggregate features were concatenated and input to a subsequent dense layer of size 100. The dropout rate was set to 0.2 and we used the Adam optimizer for training all models with cross-entropy loss. 5", |
|
"cite_spans": [ |
|
{ |
|
"start": 396, |
|
"end": 397, |
|
"text": "4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 473, |
|
"end": 495, |
|
"text": "(Agrawal et al., 2018)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 506, |
|
"end": 531, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments and Results", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We briefly summarize our results in this section. Note that we have several tunable parameters: number of topics/SHM, clusters for SHT M model, learning model parameters such as LSTM and layer dimensions, as well as the n and w parameters that affect number of training instances added per user and the context window for aggregating tweets. We tune these parameters using validation experiments. That is, the training data is randomly split into 80/20% train/validation portions of the data using three different random seeds. All parameter Table 1 : Performance of our classification is compared against the baseline model for the two subtasks of CLPsych 2021.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 542, |
|
"end": 549, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "SHT M was trained on ODH-only with 20 topics and 5 SHMs for all our selected models, except for * which was trained on ODH + PD with 50 topics and 10 SHMs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "choices are based on the averaged F1 scores from these three runs. The best models did not use large values for the context or sliding window. Rather, when instances for a user are extracted in reverse chronological order, values of w and n in the range 3-10 closest to the last available date for a user perform the best for classification on both the subtasks. This observation indicates that the content generated closest to the attempt date is highly informative in identifying a user's suicidality risk.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Word embeddings from EWE performed better than GloVE, and topic/SHM assignments from ODH-only corpus performed the best among our the three choices. The word clusters extracted from this corpus for the self-harm aspects are shown below:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "SHMID Top-words 1 death shame bipolar relationships disgust bruises emotional obesity 2 cut emotional panic doubt disorder hopeless 3 suicide stress sadness relationships bleak helpless 4 anxiety worry depression accident friendships scratch guilt", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "To analyze the differences in mood and topic profiles among the two groups of users ('positive' and 'control'), we examined the mean and variance of the KL-divergence between the SHM vector representing tweets on date t and the average SHM vector of tweets from the past w-1 dates available for a user. We proceeded similarly for the corresponding topic vectors. For the positive class, we observe higher mean and variance for the KL-divergence of SHM vectors. In contrast, we observe a lower mean and variance for the KL-divergence in topics. Taken together, these trends suggest that there is expressive variation in SHM within the positive class which might explain the high false positive rate and warrants further investigation in future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mood and Topic Profiles:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Classification Performance: Table 1 illustrates the validation and test performances using our best configurations compared against the competition provided baseline model based on Logistic Regression. For the competition, the suggested measures include F1 (the standard measure combining precision and recall), F2 (which values recall twice as much as precision), true and false positive rates (TP and FP) as well as AUC which measures how the predictions are ranked.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 28, |
|
"end": 35, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Mood and Topic Profiles:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Our model does significantly well in the validation runs on all measures for the ODH30 dataset but has significantly higher false positive rate and significantly lower AUC score for ODH182. For test performance, our model obtains a significantly higher F2 and true positive rates over the baseline model but is unable to beat the baseline on the F1 and AUC measures. We observe a significantly high number of false positives in all test runs with our model. The baseline performs surprisingly well on the test set as compared to training, while our model shows a higher degree of consistency.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mood and Topic Profiles:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Due to criticality of this prediction task, we would like to err on the side of caution. However, a high false positive rate is not useful in a practical prediction system. In future work, we aim to fully investigate this dataset specifically for reducing the FP rate, improving the overall prediction performance using other deep learning models and augmenting with related datasets (Losada et al., 2020a) . We would also like to further investigate the capacity of SHM to act as a discriminant in other learning models (SVMs were not as succesful as LSTMs in our experiments).", |
|
"cite_spans": [ |
|
{ |
|
"start": 384, |
|
"end": 406, |
|
"text": "(Losada et al., 2020a)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mood and Topic Profiles:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We presented SHT M , our topic model for representing self-harm aspects expressed in social media texts. We used features based on self-harm mood changes and topic changes in tweets over time within a deep learning model to predict suicidal users. To the best of our knowledge, we are the first to employ topic models for studying mood characterization in context of suicide risk.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Several topic models were proposed in previous works for incorporating label information and improving prediction tasks (Blei and McAuliffe, 2007; Ramage et al., 2009; Nguyen et al., 2013; Ren et al., 2020) . In future, we aim to incorporate emotion lexicons (Mohammad and Turney, 2010) into these models and suitably extend them to characterize temporal mood trends (Bolelli et al., 2009) of users with mental health issues such as depression, PTSD, and suicide .", |
|
"cite_spans": [ |
|
{ |
|
"start": 120, |
|
"end": 146, |
|
"text": "(Blei and McAuliffe, 2007;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 147, |
|
"end": 167, |
|
"text": "Ramage et al., 2009;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 168, |
|
"end": 188, |
|
"text": "Nguyen et al., 2013;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 189, |
|
"end": 206, |
|
"text": "Ren et al., 2020)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 259, |
|
"end": 286, |
|
"text": "(Mohammad and Turney, 2010)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 367, |
|
"end": 389, |
|
"text": "(Bolelli et al., 2009)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Secure access to the shared task dataset was provided with IRB approval under University of Maryland, College Park protocol 1642625.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ethics Statement", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://ourdatahelps.org", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/swcwang/ depression-detection", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/NUS-IDS/ clpsych21-sharedtask 4 All available sliding windows are considered during prediction and we predict a user as \"positive\" if any instance associated with the user is classified as positive.5 Classification models were implemented using Python 3.9.1 and associated Torch libraries provided on the Enclave.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "The authors are particularly grateful to the users who donated data to the OurDataHelps project without whom this work would not be possible, to Qntfy for supporting the OurDataHelps project and making the data available, to NORC for creating and administering the secure infrastructure, and to Amazon for supporting this research with computational resources on AWS. This research/project was supported by the National Research Foundation, Singapore under its AI Singapore Programme (AISG Award No: AISG-GC-2019-001). Any opinions, findings and conclusions or recommendations expressed in this material are those of the author(s) and do not reflect the views of National Research Foundation, Singapore.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "A Survey of Text Classification Algorithms", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Charu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chengxiang", |
|
"middle": [], |
|
"last": "Aggarwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zhai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Charu C. Aggarwal and ChengXiang Zhai. 2012. A Survey of Text Classification Algorithms.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Learning emotion-enriched word representations", |
|
"authors": [ |
|
{ |
|
"first": "Ameeta", |
|
"middle": [], |
|
"last": "Agrawal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aijun", |
|
"middle": [], |
|
"last": "An", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manos", |
|
"middle": [], |
|
"last": "Papagelis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "950--961", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ameeta Agrawal, Aijun An, and Manos Papagelis. 2018. Learning emotion-enriched word representa- tions. In Proceedings of the 27th International Con- ference on Computational Linguistics, pages 950- 961.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Supervised topic models", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "David", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jon", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Blei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mcauliffe", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "NIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "121--128", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David M. Blei and Jon D. McAuliffe. 2007. Supervised topic models. In NIPS, page 121-128.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Latent dirichlet allocation", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Blei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [ |
|
"I" |
|
], |
|
"last": "Jordan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "J. Mach. Learn. Res", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "993--1022", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David M. Blei, Andrew Y. Ng, and Michael I. Jordan. 2003. Latent dirichlet allocation. J. Mach. Learn. Res., 3:993-1022.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Topic and trend detection in text collections using latent dirichlet allocation", |
|
"authors": [ |
|
{ |
|
"first": "Levent", |
|
"middle": [], |
|
"last": "Bolelli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u015eeyda", |
|
"middle": [], |
|
"last": "Ertekin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C Lee", |
|
"middle": [], |
|
"last": "Giles", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "European conference on information retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "776--780", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Levent Bolelli,\u015eeyda Ertekin, and C Lee Giles. 2009. Topic and trend detection in text collections using latent dirichlet allocation. In European conference on information retrieval, pages 776-780. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "What about mood swings: Identifying depression on twitter with temporal measures of emotions", |
|
"authors": [ |
|
{ |
|
"first": "Xuetong", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Sykora", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Jackson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Suzanne", |
|
"middle": [], |
|
"last": "Elayan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xuetong Chen, Martin D. Sykora, Thomas W. Jack- son, and Suzanne Elayan. 2018. What about mood swings: Identifying depression on twitter with tem- poral measures of emotions. In WWW.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Quantifying mental health signals in Twitter", |
|
"authors": [ |
|
{ |
|
"first": "Glen", |
|
"middle": [], |
|
"last": "Coppersmith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Dredze", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Craig", |
|
"middle": [], |
|
"last": "Harman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the Workshop on Computational Linguistics and Clinical Psychology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "51--60", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Glen Coppersmith, Mark Dredze, and Craig Harman. 2014. Quantifying mental health signals in Twitter. In Proceedings of the Workshop on Computational Linguistics and Clinical Psychology, pages 51-60.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "EMNLP versus ACL: Analyzing NLP research over time", |
|
"authors": [ |
|
{ |
|
"first": "Xiaoli", |
|
"middle": [], |
|
"last": "Sujatha Das Gollapalli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2002--2006", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sujatha Das Gollapalli and Xiaoli Li. 2015. EMNLP versus ACL: Analyzing NLP research over time. In EMNLP, pages 2002-2006.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Studying the history of ideas using topic models", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Hall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Hall, Daniel Jurafsky, and Christopher D. Man- ning. 2008. Studying the history of ideas using topic models. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Detecting topic evolution in scientific literature: How can citations help", |
|
"authors": [ |
|
{ |
|
"first": "Qi", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Pei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Baojun", |
|
"middle": [], |
|
"last": "Qiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prasenjit", |
|
"middle": [], |
|
"last": "Mitra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lee", |
|
"middle": [], |
|
"last": "Giles", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "CIKM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "957--966", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qi He, Bi Chen, Jian Pei, Baojun Qiu, Prasenjit Mi- tra, and Lee Giles. 2009. Detecting topic evolution in scientific literature: How can citations help? In CIKM, page 957-966.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Parameter estimation for text analysis", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Heinrich", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "G. Heinrich. 2005. Parameter estimation for text analy- sis. Web: http://www. arbylon. net/publications/text- est. pdf.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Adolescent self-harm and suicidal thoughts in the alspac cohort: a self-report survey in england", |
|
"authors": [ |
|
{ |
|
"first": "Judi", |
|
"middle": [], |
|
"last": "Kidger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jon", |
|
"middle": [], |
|
"last": "Heron", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Glyn", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Evans", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Gunnell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "In BMC Psychiatry", |
|
"volume": "12", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Judi Kidger, Jon Heron, Glyn Lewis, Jonathan Evans, and David Gunnell. 2012. Adolescent self-harm and suicidal thoughts in the alspac cohort: a self-report survey in england. In BMC Psychiatry 12, 69.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Latent dirichlet allocation: Stability and applications to studies of usergenerated content", |
|
"authors": [ |
|
{ |
|
"first": "Sergei", |
|
"middle": [], |
|
"last": "Koltcov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Olessia", |
|
"middle": [], |
|
"last": "Koltsova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergey", |
|
"middle": [], |
|
"last": "Nikolenko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 ACM Conference on Web Science", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "161--165", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sergei Koltcov, Olessia Koltsova, and Sergey Nikolenko. 2014. Latent dirichlet allocation: Stability and applications to studies of user- generated content. In Proceedings of the 2014 ACM Conference on Web Science, page 161-165.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "On-line trend analysis with topic models:# twitter trends detection topic model online", |
|
"authors": [ |
|
{ |
|
"first": "Nigel", |
|
"middle": [], |
|
"last": "Jey Han Lau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Collier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Baldwin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of COLING 2012", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1519--1534", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jey Han Lau, Nigel Collier, and Timothy Baldwin. 2012. On-line trend analysis with topic models:# twitter trends detection topic model online. In Pro- ceedings of COLING 2012, pages 1519-1534.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Joint sentiment/topic model for sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Chenghua", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yulan", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "CIKM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "375--384", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chenghua Lin and Yulan He. 2009. Joint senti- ment/topic model for sentiment analysis. In CIKM, page 375-384.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Self-harm and depression challenges", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Losada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fabio", |
|
"middle": [], |
|
"last": "Crestani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Javier", |
|
"middle": [], |
|
"last": "Parapar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Advances in Information Retrieval", |
|
"volume": "2020", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David E. Losada, Fabio Crestani, and Javier Parapar. 2020a. erisk 2020: Self-harm and depression chal- lenges. In Advances in Information Retrieval.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Overview of erisk 2020: Early risk prediction on the internet", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Losada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fabio", |
|
"middle": [], |
|
"last": "Crestani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Javier", |
|
"middle": [], |
|
"last": "Parapar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Experimental IR Meets Multilinguality, Multimodality, and Interaction", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David E. Losada, Fabio Crestani, and Javier Parapar. 2020b. Overview of erisk 2020: Early risk predic- tion on the internet. In Experimental IR Meets Mul- tilinguality, Multimodality, and Interaction, Lecture Notes in Computer Science.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Community-level research on suicidality prediction in a secure environment: Overview of the CLPsych 2021 shared task", |
|
"authors": [ |
|
{ |
|
"first": "Sean", |
|
"middle": [], |
|
"last": "Macavaney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anjali", |
|
"middle": [], |
|
"last": "Mittu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Glen", |
|
"middle": [], |
|
"last": "Coppersmith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Leintz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Resnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "CLPsych", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sean Macavaney, Anjali Mittu, Glen Coppersmith, Jeff Leintz, and Philip Resnik. 2021. Community-level research on suicidality prediction in a secure envi- ronment: Overview of the CLPsych 2021 shared task. In CLPsych.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Mallet: A machine learning for language toolkit", |
|
"authors": [ |
|
{ |
|
"first": "Andrew Kachites", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrew Kachites McCallum. 2002. Mallet: A machine learning for language toolkit. Http://mallet.cs.umass.edu.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Emotions evoked by common words and phrases: Using Mechanical Turk to create an emotion lexicon", |
|
"authors": [ |
|
{ |
|
"first": "Saif", |
|
"middle": [], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Turney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the NAACL HLT 2010 Workshop on Computational Approaches to Analysis and Generation of Emotion in Text", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Saif Mohammad and Peter Turney. 2010. Emotions evoked by common words and phrases: Using Me- chanical Turk to create an emotion lexicon. In Proceedings of the NAACL HLT 2010 Workshop on Computational Approaches to Analysis and Genera- tion of Emotion in Text.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Lexical and hierarchical topic regression", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Viet-An", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jordan", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Ying", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Resnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "26", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Viet-An Nguyen, Jordan L Ying, and Philip Resnik. 2013. Lexical and hierarchical topic regression. In Advances in Neural Information Processing Systems, volume 26.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Proceedings of the Sixth Workshop on Computational Linguistics and Clinical Psychology", |
|
"authors": [ |
|
{ |
|
"first": "Kate", |
|
"middle": [], |
|
"last": "Niederhoffer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristy", |
|
"middle": [], |
|
"last": "Hollingshead", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kate Niederhoffer, Kristy Hollingshead, Philip Resnik, Rebecca Resnik, and Kate Loveys, editors. 2019. Proceedings of the Sixth Workshop on Computa- tional Linguistics and Clinical Psychology.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Global patterns of mortality in young people: a systematic analysis of population health data", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Patton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Coffey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Sawyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Viner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Haller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Bose", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Vos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Ferguson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Mathers", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Lancet", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "G.C. Patton, C. Coffey, S.M. Sawyer, Viner R.M., Haller D.M., Bose K., Vos T., Ferguson J., and Math- ers C.D. 2009. Global patterns of mortality in young people: a systematic analysis of population health data. In Lancet.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "GloVe: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. GloVe: Global vectors for word representation. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "A survey on deep learning: Algorithms, techniques, and applications", |
|
"authors": [ |
|
{ |
|
"first": "Samira", |
|
"middle": [], |
|
"last": "Pouyanfar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Saad", |
|
"middle": [], |
|
"last": "Sadiq", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yilin", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haiman", |
|
"middle": [], |
|
"last": "Tian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yudong", |
|
"middle": [], |
|
"last": "Tao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [ |
|
"Presa" |
|
], |
|
"last": "Reyes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mei-Ling", |
|
"middle": [], |
|
"last": "Shyu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shu-Ching", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Iyengar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Samira Pouyanfar, Saad Sadiq, Yilin Yan, Haiman Tian, Yudong Tao, Maria Presa Reyes, Mei-Ling Shyu, Shu-Ching Chen, and S. S. Iyengar. 2018. A sur- vey on deep learning: Algorithms, techniques, and applications. ACM Comput. Surv.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Labeled LDA: A supervised topic model for credit attribution in multilabeled corpora", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Ramage", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Hall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ramesh", |
|
"middle": [], |
|
"last": "Nallapati", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Ramage, David Hall, Ramesh Nallapati, and Christopher D. Manning. 2009. Labeled LDA: A su- pervised topic model for credit attribution in multi- labeled corpora. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Prediction focused topic models via feature selection", |
|
"authors": [ |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Russell", |
|
"middle": [], |
|
"last": "Kunes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Finale", |
|
"middle": [], |
|
"last": "Doshi-Velez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "AISTATS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jason Ren, Russell Kunes, and Finale Doshi-Velez. 2020. Prediction focused topic models via feature selection. In AISTATS.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Expert, crowdsourced, and machine assessment of suicide risk via online postings", |
|
"authors": [ |
|
{ |
|
"first": "Han-Chin", |
|
"middle": [], |
|
"last": "Shing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Suraj", |
|
"middle": [], |
|
"last": "Nair", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ayah", |
|
"middle": [], |
|
"last": "Zirikly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Meir", |
|
"middle": [], |
|
"last": "Friedenberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hal", |
|
"middle": [], |
|
"last": "Daum\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iii", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Resnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Fifth Workshop on Computational Linguistics and Clinical Psychology: From Keyboard to Clinic", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "25--36", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Han-Chin Shing, Suraj Nair, Ayah Zirikly, Meir Friedenberg, Hal Daum\u00e9 III, and Philip Resnik. 2018. Expert, crowdsourced, and machine assess- ment of suicide risk via online postings. In Proceed- ings of the Fifth Workshop on Computational Lin- guistics and Clinical Psychology: From Keyboard to Clinic, pages 25-36.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Bioinfo@uavr at erisk 2020: on the use of psycholinguistics features and machine learning for the classification and quantification of mental diseases", |
|
"authors": [ |
|
{ |
|
"first": "Alina", |
|
"middle": [], |
|
"last": "Trifan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pedro", |
|
"middle": [], |
|
"last": "Salgado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jos\u00e9 Lu\u00eds", |
|
"middle": [], |
|
"last": "Oliveira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Working Notes of CLEF", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alina Trifan, Pedro Salgado, and Jos\u00e9 Lu\u00eds Oliveira. 2020. Bioinfo@uavr at erisk 2020: on the use of psycholinguistics features and machine learning for the classification and quantification of mental dis- eases. In Working Notes of CLEF.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Lda-based document models for ad-hoc retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Xing", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W. Bruce", |
|
"middle": [], |
|
"last": "Croft", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "SIGIR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "178--185", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xing Wei and W. Bruce Croft. 2006. Lda-based doc- ument models for ad-hoc retrieval. In SIGIR, page 178-185.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "CLPsych 2019 shared task: Predicting the degree of suicide risk in Reddit posts", |
|
"authors": [ |
|
{ |
|
"first": "Ayah", |
|
"middle": [], |
|
"last": "Zirikly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Resnik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u00d6zlem", |
|
"middle": [], |
|
"last": "Uzuner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristy", |
|
"middle": [], |
|
"last": "Hollingshead", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Sixth Workshop on Computational Linguistics and Clinical Psychology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ayah Zirikly, Philip Resnik, \u00d6zlem Uzuner, and Kristy Hollingshead. 2019. CLPsych 2019 shared task: Predicting the degree of suicide risk in Reddit posts. In Proceedings of the Sixth Workshop on Computa- tional Linguistics and Clinical Psychology.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"text": "Plate diagram illustrating the graphical model for SHT M . D is the number of tweets.", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"text": "Schematic diagram of our model", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"TABREF0": { |
|
"type_str": "table", |
|
"num": null, |
|
"text": "228\u00b10.108 0.259\u00b10.135 0.285\u00b10.159 0.729\u00b10.115 0.335\u00b10.169 Best Validation: w=3, n=3 0.706\u00b10.181 0.749\u00b10.196 0.783\u00b10.214 0.270\u00b10.115 0.800\u00b10.192", |
|
"content": "<table><tr><td>Setting/Model</td><td>F1</td><td>F2</td><td>TP</td><td>FP</td><td>AUC</td></tr><tr><td>ODH-30</td><td/><td colspan=\"2\">Averaged Validation Performance</td><td/><td/></tr><tr><td>Competition Baseline</td><td colspan=\"3\">0.Test Performance</td><td/><td/></tr><tr><td>Competition Baseline</td><td>0.636</td><td>0.636</td><td>0.636</td><td>0.364</td><td>0.661</td></tr><tr><td>Our Top-2 submitted runs: w=3, n=3</td><td>0.615</td><td>0.714</td><td>0.8</td><td>0.727</td><td>0.664</td></tr><tr><td>w=5, n=2</td><td>0.583</td><td>0.648</td><td>0.7</td><td>0.636</td><td>0.645</td></tr><tr><td>ODH-182</td><td/><td colspan=\"2\">Averaged Validation Performance</td><td/><td/></tr><tr><td>Competition Baseline</td><td colspan=\"5\">0.547\u00b10.034 0.597\u00b10.049 0.643\u00b10.105 0.483\u00b10.178 0.654\u00b10.033</td></tr><tr><td>Best Validation, w=10, n=7</td><td colspan=\"5\">0.623\u00b10.044 0.783\u00b10.012 0.950\u00b10.042 0.780\u00b10.088 0.587\u00b10.076</td></tr><tr><td/><td/><td colspan=\"2\">Test Performance</td><td/><td/></tr><tr><td>Competition Baseline</td><td>0.71</td><td>0.724</td><td>0.733</td><td>0.333</td><td>0.764</td></tr><tr><td>Our Top-2 submitted runs: w=10, n=7</td><td>0.684</td><td>0.812</td><td>0.929</td><td>0.786</td><td>0.663</td></tr><tr><td>w=10, n=7 *</td><td>0.703</td><td>0.823</td><td>0.929</td><td>0.714</td><td>0.648</td></tr></table>", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |