|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:58:26.137130Z" |
|
}, |
|
"title": "Enhancing Transformer with Sememe Knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Yuhui", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Stanford University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Chenghao", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Columbia University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Zhengping", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Stanford University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Tsinghua University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "While large-scale pretraining has achieved great success in many NLP tasks, it has not been fully studied whether external linguistic knowledge can improve data-driven models. In this work, we introduce sememe knowledge into Transformer and propose three sememeenhanced Transformer models. Sememes, by linguistic definition, are the minimum semantic units of language, which can well represent implicit semantic meanings behind words. Our experiments demonstrate that introducing sememe knowledge into Transformer can consistently improve language modeling and downstream tasks. The adversarial test further demonstrates that sememe knowledge can substantially improve model robustness. 1 * Indicates equal contribution. Work done at Tsinghua University. Y.Z. and C.Y. designed and evaluated the model architecture and performed the adversarial test. Z.Z. performed the data ablation study and case study. Z.L. supervised the work and is the corresponding author.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "While large-scale pretraining has achieved great success in many NLP tasks, it has not been fully studied whether external linguistic knowledge can improve data-driven models. In this work, we introduce sememe knowledge into Transformer and propose three sememeenhanced Transformer models. Sememes, by linguistic definition, are the minimum semantic units of language, which can well represent implicit semantic meanings behind words. Our experiments demonstrate that introducing sememe knowledge into Transformer can consistently improve language modeling and downstream tasks. The adversarial test further demonstrates that sememe knowledge can substantially improve model robustness. 1 * Indicates equal contribution. Work done at Tsinghua University. Y.Z. and C.Y. designed and evaluated the model architecture and performed the adversarial test. Z.Z. performed the data ablation study and case study. Z.L. supervised the work and is the corresponding author.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Self-supervised pretraining has significantly improved the performance of Transformer (Vaswani et al., 2017 ) on a wide range of NLP tasks (Radford et al., 2018; Devlin et al., 2019; . While no explicit linguistic rules and concepts are introduced, models can achieve remarkable performances with extensive training signals provided by large-scale data. Nonetheless, recent works still demonstrate that external syntactic information can improve various NLP tasks, including machine translation (Sennrich and Haddow, 2016; Aharoni and Goldberg, 2017; Bastings et al., 2017) and semantic role labeling Strubell et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 86, |
|
"end": 107, |
|
"text": "(Vaswani et al., 2017", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 139, |
|
"end": 161, |
|
"text": "(Radford et al., 2018;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 162, |
|
"end": 182, |
|
"text": "Devlin et al., 2019;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 495, |
|
"end": 522, |
|
"text": "(Sennrich and Haddow, 2016;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 523, |
|
"end": 550, |
|
"text": "Aharoni and Goldberg, 2017;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 551, |
|
"end": 573, |
|
"text": "Bastings et al., 2017)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 601, |
|
"end": 623, |
|
"text": "Strubell et al., 2018)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Can external semantic information benefit the widely-adopted pretraining and fine-tuning framework as well? In response, we explore incorporating sememe knowledge into Transformer (Vaswani et al., 2017) . Sememes are the minimum semantic units of meaning for natural language, as some linguists assume that a limited closed set of sememes can be composed to represent the semantic meaning of each word (Bloomfield, 1926) . In this work, we adopt a highquality sememe-based lexical knowledge base, HowNet (Dong and Dong, 2006; Qi et al., 2019) , which can provide powerful support for models to understand Chinese word semantics (Gu et al., 2018; Niu et al., 2017) . Some examples of sememe annotations can be found in Figure 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 180, |
|
"end": 202, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 402, |
|
"end": 420, |
|
"text": "(Bloomfield, 1926)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 504, |
|
"end": 525, |
|
"text": "(Dong and Dong, 2006;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 526, |
|
"end": 542, |
|
"text": "Qi et al., 2019)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 628, |
|
"end": 645, |
|
"text": "(Gu et al., 2018;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 646, |
|
"end": 663, |
|
"text": "Niu et al., 2017)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 718, |
|
"end": 726, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We propose to combine two simple methods to incorporate sememe knowledge into our framework: 1) based on the linguistic assumption, we add aggregated sememe embeddings to each word embedding to enhance its semantic representation; 2) we use sememe prediction as an auxiliary task to help the model gain deeper understandings of word semantics. We verify the effectiveness of our methods on several Chinese NLP tasks that are closely related to word-level and sentence-level semantics. Following general settings of pretraining and fine-tuning, our experiments show consistent improvements on all the tasks with sememeenhanced Transformer. We also find that the sememe-enhanced model can achieve the same performance with less fine-tuning data, which is desirable as data annotation processes are always time-consuming and expensive.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We further demonstrate that, by incorporating sememe knowledge using our methods, model robustness can be significantly improved towards adversarial examples, which are generated by replacing nouns, adjectives and adverbs with their synonyms in our experiment. Our case studies further interpret why sememe knowledge can help model defend adversarial attacks. Figure 1 : Our proposed model architecture. For each word, we enhance word representation by adding aggregated sememe embeddings. We use multitask learning with three tasks: sememe prediction (predicting sememes of next word), language modeling (predicting next word) and supervised learning (only for downstream tasks).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 360, |
|
"end": 368, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this section, we propose two simple methods to incorporate sememe knowledge into our framework: aggregated sememe embeddings and sememe prediction auxiliary task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Transformer was originally proposed by Vaswani et al. (2017) as a machine translation architecture. We use a multi-layer Transformer architecture similar to the setup in Radford et al. (2018) , which has been verified effectiveness on multiple NLP tasks. At the input layer, a sequence of words (w 1 , w 2 , ..., w T ) are embedded as H 0 = (w 1 , w 2 , ..., w T ) \u2208 R T \u00d7D , where D indicates the hidden size of the model. A positional embedding is then added to inject position information into Transformer. After L residual multi-head self-attention layers with feed-forward connections, we obtain the contextualized sequence embedding", |
|
"cite_spans": [ |
|
{ |
|
"start": 39, |
|
"end": 60, |
|
"text": "Vaswani et al. (2017)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 170, |
|
"end": 191, |
|
"text": "Radford et al. (2018)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transformer", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "H L = (h L 1 , h L 2 , ..., h L T ) \u2208 R T \u00d7D .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transformer", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Enhancing word representation is a common approach to introduce linguistic knowledge into neural networks (Sennrich and Haddow, 2016; Niu et al., 2017; Bojanowski et al., 2017) . For each word w, Transformer-SE considers all of its sememes and enhances word representation by adding its average sememe embeddings to word embedding. Formally, we have:", |
|
"cite_spans": [ |
|
{ |
|
"start": 106, |
|
"end": 133, |
|
"text": "(Sennrich and Haddow, 2016;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 134, |
|
"end": 151, |
|
"text": "Niu et al., 2017;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 152, |
|
"end": 176, |
|
"text": "Bojanowski et al., 2017)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aggregated Sememe Embeddings", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "w = 1 n w s\u2208S(w) x s + w", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aggregated Sememe Embeddings", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "where S(w) refers to the sememe set associated with word w with the size n w , x s refers to the embedding of the sememe s, w refers to the embedding of word w andw refers to the sememeenhanced word embedding. Sememe-enhanced representationw is directly fed into Transformer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aggregated Sememe Embeddings", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The Transformer-SE model complies with the linguistic assumption that implicit word semantics can be composed of a limited set of sememes. Also, as sememe embeddings are shared among words, latent semantic correlations between words can be well encoded. While our method to incorporate sememe knowledge is rather straightforward, our main purpose is to verify the effectiveness of sememe knowledge. We leave more potential methods to enrich word-level semantics with sememe knowledge such as tree LSTM (Tai et al., 2015) and graph convolutional network (Bastings et al., 2017) in future work.", |
|
"cite_spans": [ |
|
{ |
|
"start": 502, |
|
"end": 520, |
|
"text": "(Tai et al., 2015)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 553, |
|
"end": 576, |
|
"text": "(Bastings et al., 2017)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aggregated Sememe Embeddings", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Sememe prediction task aims to predict sememes for the next word and can be formulated as a multilabel classification task. Inspired by the multitask learning (Caruana, 1997; Collobert et al., 2011) , we add the sememe prediction task in addition to the language modeling task for Transformer-SP. This task challenges the model's capability to incorporate sememe knowledge, and can be viewed as a complementary task for language modeling, as predicting the sememes of the next word is closely related to understanding semantics and it is often more learnable than directly modeling the probability of the next word. 2 At each time step, given current contextualized representation h L from Transformer, we estimate the probability of sememe s associated with next word w as p(w, s) = \u03c3(wh L + b), where w and b are the weight and bias associated with sememe s, \u03c3 is the sigmoid activation function. We then calculate the binary cross-entropy loss of sememe prediction L SP as:", |
|
"cite_spans": [ |
|
{ |
|
"start": 159, |
|
"end": 174, |
|
"text": "(Caruana, 1997;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 175, |
|
"end": 198, |
|
"text": "Collobert et al., 2011)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 616, |
|
"end": 617, |
|
"text": "2", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sememe Prediction Auxiliary Task", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "L SP = \u2212 1 T T t=1 1 n s\u2208S g(w t , s) log(p(w t , s)) +(1 \u2212 g(w t , s)) log(1 \u2212 p(w t , s))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sememe Prediction Auxiliary Task", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "where S refers to the overall sememe set with the size n, g(w, s) is a binary variable indicating whether sememe s is associated with word w. Finally, we formulate the loss as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sememe Prediction Auxiliary Task", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "L P RE = L LM + L SP L = L SL + \u03c1L P RE", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sememe Prediction Auxiliary Task", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "where L LM and L SL are the conventional negative log-likelihood language modeling loss and downstream supervised learning loss. L P RE is the loss optimized during pretraining, while L is the loss optimized during supervised training for downstream tasks, \u03c1 serves as a coefficient to control the strength of L P RE during supervised learning.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sememe Prediction Auxiliary Task", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Transformer-SE and Transformer-SP are designed based on different ideas. Transformer-SE can well inform sememe knowledge to all self-attention layers, while Transformer-SP utilizes additional training signals through the back-propagation process.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hybrid Model", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "To combine the advantages of these models, we propose a hybrid model named Transformer-SEP.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hybrid Model", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Transformer-SEP incorporates sememe knowledge into the input layer by adding aggregated sememe embeddings and performs the sememe prediction auxiliary task in the output layer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hybrid Model", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "We experiment across a diverse set of five benchmark NLP tasks and demonstrate the effectiveness of introducing sememe knowledge.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We use 6-layer 8-head Transformer with the hidden size of 768 and feedforward size of 2048. We set both word embedding and sememe embedding size as 768. We use batch size of 32 and set dropout rate as 0.2 to alleviate overfitting. The vocabulary size is 39,770 and the total number of sememes is 2,100. We truncate the sequence length to 128 for pretraining and supervised learning. When performing supervised training, we set the coefficient \u03c1 to be 0.5. Embeddings are tied for the input layer and output layer to speed up convergence. We clip gradients less than 2 and use Adam optimizer (Kingma and Ba, 2014) with 0.001 learning rate and 8000 warmup steps. For downstream tasks, we use the best pretrained model from language modeling to initialize.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Language Modeling Language modeling on a large corpus provides additional training signals for supervised downstream tasks. We use perplexity (PPL) to measure the performance of the language model. Lower PPL indicates better performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tasks and Datasets", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We pretrain the language model on the People's Daily corpus, which contains \u223c 15M words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tasks and Datasets", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Headline Categorization Automatic and accurate news categorization is essential for recommen- dation systems. We use NLPCC 2017 news headline categorization dataset (Qiu et al., 2017) , which contains 156,000 news for training and 36,000 news for validation, divided into 18 categories including finance, society, game, etc. We use accuracy (ACC) to measure the performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 165, |
|
"end": 183, |
|
"text": "(Qiu et al., 2017)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tasks and Datasets", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Sentiment Classification Sentiment classification is a useful task for emoticon recommendation, depression detection, etc. We use NLPCC 2013 Weibo sentiment detection dataset and conduct experiments on sentence-level sentiment classification. The dataset includes 7 different sentiment genres. We remove sentences without any sentiment and resplit the data to 8,225 / 997 / 1,020 for training, validation, test, respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tasks and Datasets", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Semantic Matching Semantic matching is fundamental for question answering, which aims to match the input question to similar questions in an existing database. We use LCQMC dataset for this task, which contains 238,766 / 8,802 / 12,500 training, validation, test data, respectively. For each pair of questions, we concatenate them with a special token for classification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tasks and Datasets", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Sememe Prediction Predicting sememes for given words by its definitions is important for the HowNet extension . The definitions are extracted from the Contemporary Chinese Dictionary and the sememes of target words are masked for fair comparison. We create a dataset containing 41,081 / 5,135 / 5,136 word-definition pairs for training, validation and test.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tasks and Datasets", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "From Table 1 , we observe that simply adding sememe embedding (i.e., Transformer-SE) can lead to significant improvements over all tasks. These tasks challenge models on the capability of modeling word-level semantics and sentence-level semantics, which demonstrates that sememe knowledge can provide beneficial semantic information for Transformer. The improvement of Transformer-SP is rather less, which may due to the difficulty of predicting new knowledge without previous knowledge. Transformer-SEP achieves further improvements over Transformer-SE. The additional improvement can be interpreted as combining the advantages of these two models. As characters provide strong semantics for Chinese (Chen et al., 2015) , we also compare sememe decomposition with character decomposition (Se-meme2Char) for our best model (i.e., with aggregated character embedding and character prediction auxiliary task). From Table 1 , we observe clear performance drops over all tasks, which demonstrates that decomposing word into sememes are much more effective.", |
|
"cite_spans": [ |
|
{ |
|
"start": 701, |
|
"end": 720, |
|
"text": "(Chen et al., 2015)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 5, |
|
"end": 12, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 913, |
|
"end": 920, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Overall Performance", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We further perform data ablation study and observe overall consistent improvements for downstream tasks over different amounts of training data, indicating that incorporating external sememe knowledge could benefit model robustness when faced with limited training data ( Figure 2 ). It is also worth noting that, when training data is limited, the more a task depends on word-level semantics (e.g., headline categorization > sentiment classification > semantic matching 3 ), the larger improvement can be achieved by incorporating sememe knowledge. We hypothesize this is due to the increased unseen words in the test set when faced Transformer-SEP) . We generate adversarial examples by replacing nouns, adjectives, and adverbs for cases that both models can predict correctly. We report error rate (lower the better) categorized by part-of-speech and the number of generated adversarial examples.", |
|
"cite_spans": [ |
|
{ |
|
"start": 634, |
|
"end": 650, |
|
"text": "Transformer-SEP)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 272, |
|
"end": 280, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data Ablation Study", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "with less training data. As semantically similar words would share similar sememes, the sememeinformed model would better understand semantics and outperform the baseline by a large margin.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Ablation Study", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "Recent research has demonstrated that neural networks are vulnerable to adversarial examples (Goodfellow et al., 2015; Jia and Liang, 2017; Alzantot et al., 2018) . To evaluate the robustness of our models, we generate adversarial examples by replacing similar nouns, adjectives and adverbs for the cases that both Transformer and Transformer-SEP can predict correctly. Intuitively, these words are generally more informative for prediction and models are more likely to overfit such words. Specifically, we compute the word similarity based on the novel Cilin metric (Tian and Zhao, 2010) and we use THULAC (Sun et al., 2016) for part-of-speech (POS) tagging. For the semantic matching task, we only replace words that occur in both sentences to ensure semantic consistency.", |
|
"cite_spans": [ |
|
{ |
|
"start": 93, |
|
"end": 118, |
|
"text": "(Goodfellow et al., 2015;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 119, |
|
"end": 139, |
|
"text": "Jia and Liang, 2017;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 140, |
|
"end": 162, |
|
"text": "Alzantot et al., 2018)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 608, |
|
"end": 626, |
|
"text": "(Sun et al., 2016)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Adversarial Test and Case Study", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "\u5978 \u5978 \u5978\u5546 \u5546 \u5546( ( (\u9a97 \u9a97 \u9a97\u5b50 \u5b50 \u5b50) ) )\u5982\u4f55\u6709\u5de5\u4f5c\u724c\u5728\u884c\u674e\u5927\u5385\u91cc\u660e\u76ee\u5f20 \u80c6\u884c\u9a97\uff1f", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Adversarial Test and Case Study", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "How do the profiteers (cheaters) have staff cards and blatantly cheat in the baggage hall? Table 3 : Case study for the adversarial test. The original word with its sememes is colored in blue, while the replaced word with its sememes is colored in red.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 91, |
|
"end": 98, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Adversarial Test and Case Study", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "\u6709 \u6709 \u6709\u7f6a \u7f6a \u7f6a guilty \u4eba \u4eba \u4eba human \u6b3a \u6b3a \u6b3a\u9a97 \u9a97 \u9a97 deceive \u5546 \u5546 \u5546\u4e1a \u4e1a \u4e1a commerce \u6709 \u6709 \u6709\u7f6a \u7f6a \u7f6a guilty \u4eba \u4eba \u4eba human \u9a97 \u9a97 \u9a97 cheat", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Adversarial Test and Case Study", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "We report the adversarial test error rate categorized by POS in Table 2 . Sememe-enhanced Transformer-SEP achieves consistent improvement over the vanilla Transformer. An interesting find-ing is that, in headline categorization and semantic matching, the largest performance drops are observed by replacing nouns while intuitively sentiment classification should be more sensitive to adjectives.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 64, |
|
"end": 71, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Adversarial Test and Case Study", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "We further perform the case study to get a better interpretation of why sememe knowledge can improve model robustness to adversarial attacks. We show an example that Transformer-SEP can predict correctly but get wrong for Transformer in Table 3 . As word \"cheater\" and \"profiteer\" share the same sememes \"guilty\" and \"human\" and similar sememes \"deceive\" and \"cheat\", this sememe knowledge can propagate through all self-attention layers, thus it is easy to interpret why sememe knowledge can enhance word representation and defend such word-replacement attack. More examples can be found in the Appendix.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 237, |
|
"end": 244, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Adversarial Test and Case Study", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "In this work, we introduce sememe knowledge into Transformer and verify the effectiveness of external semantic knowledge for data-driven models. We further demonstrate the robustness of our methods via data ablation study and adversarial test. For future work, we would like to explore more ways to leverage semantic knowledge and generate different adversarial examples for evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "For example, if a sentence starts with \"How to cook\", it is much easier to predict the next word is a kind of \"food\" than any specified word. It is worth noting that language modeling has about 20 times larger vocabulary size.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For instance, the word football strongly indicates sport for headline categorization, while what's football? = is it a football? for semantic matching.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "The authors would like to thank the anonymous reviewers for their many insightful comments. This work is (jointly or partly) funded by the Natural Science Foundation of China (NSFC) and the German Research Foundation (DFG) in Project Crossmodal Learning, NSFC 61621136008 / DFG TRR-169.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Input Ours Base Sentiment \u5978 \u5978 \u5978\u5546 \u5546 \u5546( ( (\u9a97 \u9a97 \u9a97\u5b50 \u5b50 \u5b50) ) ) \u5982\u4f55\u6709\u5de5\u4f5c\u724c\u5728\u884c\u674e\u5927\u5385\u91cc\u660e\u76ee\u5f20\u80c6\u884c\u9a97\uff1f disgust surprise Classification How do the profiteers (cheaters) have staff cards and blatantly cheat in the baggage hall?Chinese medicine is less safe than Western medicine.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u8f6c\u8f7d\u4e00\u4e2a\u6210 \u6210 \u6210\u65b9 \u65b9 \u65b9 ( ( (\u79d8 \u79d8 \u79d8\u65b9 \u65b9 \u65b9) ) ) \uff0c\u4e3b\u6cbb\u4e00\u5207\u9aa8\u6298\uff0c\u636e\u8bf4\u4e00\u5242\u89c1\u6548 regimen essay Categorization We republish a set prescription (secret prescription) , which mainly treats all kinds of fractures, and is said to be effective with only one dose.history story He was a good general (valiant general) that attacked Goguryeo for three times, yet was killed by a group of rogues. A. \u521d \u521d \u521d\u4e2d \u4e2d \u4e2d\u751f \u751f \u751f( ( (\u7537 \u7537 \u7537\u751f \u751f \u751f) ) ) \u6697\u604b\u5973\u751f\u4f1a\u6709\u4ec0\u4e48\u8868\u73b0\uff1f A. What performance will junior high school students (boy students) have if they secretly love a girl?What is the performance of junior high school students (boy students) if they secretly love a girl?Case Study for adversarial test. The original words are shown in parenthesis and colored in blue, while the replaced words (similar words calculated by Cilin (Tian and Zhao, 2010)) are colored in red. Both the base model and our model (i.e. Transformer v.s. Transformer-SEP) predict correctly on sentences with the original words, yet only ours succeed in the sentences with the replaced words. We show sememes for original words and sememes for replaced words in blue and red color boxes respectively. Best viewed in color.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Headline", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Towards string-to-tree neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Roee", |
|
"middle": [], |
|
"last": "Aharoni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "132--140", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roee Aharoni and Yoav Goldberg. 2017. Towards string-to-tree neural machine translation. In Pro- ceedings of the 55th Annual Meeting of the Associa- tion for Computational Linguistics (Volume 2: Short Papers), pages 132-140.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Generating natural language adversarial examples", |
|
"authors": [ |
|
{ |
|
"first": "Moustafa", |
|
"middle": [], |
|
"last": "Alzantot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yash", |
|
"middle": [ |
|
"Sharma" |
|
], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ahmed", |
|
"middle": [], |
|
"last": "Elgohary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo-Jhang", |
|
"middle": [], |
|
"last": "Ho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mani", |
|
"middle": [], |
|
"last": "Srivastava", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Moustafa Alzantot, Yash Sharma Sharma, Ahmed El- gohary, Bo-Jhang Ho, Mani Srivastava, and Kai-Wei Chang. 2018. Generating natural language adver- sarial examples. In Proceedings of the 2018 Con- ference on Empirical Methods in Natural Language Processing.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Graph convolutional encoders for syntax-aware neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Joost", |
|
"middle": [], |
|
"last": "Bastings", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Titov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wilker", |
|
"middle": [], |
|
"last": "Aziz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diego", |
|
"middle": [], |
|
"last": "Marcheggiani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Khalil", |
|
"middle": [], |
|
"last": "Sima", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1957--1967", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joost Bastings, Ivan Titov, Wilker Aziz, Diego Marcheggiani, and Khalil Sima'an. 2017. Graph convolutional encoders for syntax-aware neural ma- chine translation. In Proceedings of the 2017 Con- ference on Empirical Methods in Natural Language Processing, pages 1957-1967.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "A set of postulates for the science of language", |
|
"authors": [ |
|
{ |
|
"first": "Leonard", |
|
"middle": [], |
|
"last": "Bloomfield", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1926, |
|
"venue": "Language", |
|
"volume": "2", |
|
"issue": "3", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leonard Bloomfield. 1926. A set of postulates for the science of language. Language, 2(3).", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Enriching word vectors with subword information", |
|
"authors": [ |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "135--146", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enriching word vectors with subword information. Transactions of the Associa- tion for Computational Linguistics, 5:135-146.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Multitask learning. Machine learning", |
|
"authors": [ |
|
{ |
|
"first": "Rich", |
|
"middle": [], |
|
"last": "Caruana", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "", |
|
"volume": "28", |
|
"issue": "", |
|
"pages": "41--75", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rich Caruana. 1997. Multitask learning. Machine learning, 28(1):41-75.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Joint learning of character and word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Xinxiong", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Huanbo", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Twenty-Fourth International Joint Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xinxiong Chen, Lei Xu, Zhiyuan Liu, Maosong Sun, and Huanbo Luan. 2015. Joint learning of charac- ter and word embeddings. In Twenty-Fourth Inter- national Joint Conference on Artificial Intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Natural language processing (almost) from scratch", |
|
"authors": [ |
|
{ |
|
"first": "Ronan", |
|
"middle": [], |
|
"last": "Collobert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L\u00e9on", |
|
"middle": [], |
|
"last": "Bottou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Karlen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Koray", |
|
"middle": [], |
|
"last": "Kavukcuoglu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pavel", |
|
"middle": [], |
|
"last": "Kuksa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "12", |
|
"issue": "", |
|
"pages": "2461--2505", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ronan Collobert, Jason Weston, L\u00e9on Bottou, Michael Karlen, Koray Kavukcuoglu, and Pavel Kuksa. 2011. Natural language processing (almost) from scratch. Journal of Machine Learning Research, 12:2461- 2505.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 4171-4186.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Hownet and the computation of meaning (with Cd-rom)", |
|
"authors": [ |
|
{ |
|
"first": "Zhendong", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qiang", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhendong Dong and Qiang Dong. 2006. Hownet and the computation of meaning (with Cd-rom). World Scientific.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Explaining and harnessing adversarial examples", |
|
"authors": [ |
|
{ |
|
"first": "Ian", |
|
"middle": [], |
|
"last": "Goodfellow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathon", |
|
"middle": [], |
|
"last": "Shlens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Szegedy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ian Goodfellow, Jonathon Shlens, and Christian Szegedy. 2015. Explaining and harnessing adversar- ial examples. In International Conference on Learn- ing Representations.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Language modeling with sparse product of sememe experts", |
|
"authors": [ |
|
{ |
|
"first": "Yihong", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruobing", |
|
"middle": [], |
|
"last": "Xie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fen", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leyu", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4642--4651", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yihong Gu, Jun Yan, Hao Zhu, Zhiyuan Liu, Ruobing Xie, Maosong Sun, Fen Lin, and Leyu Lin. 2018. Language modeling with sparse product of sememe experts. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 4642-4651.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Adversarial examples for evaluating reading comprehension systems", |
|
"authors": [ |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2021--2031", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robin Jia and Percy Liang. 2017. Adversarial exam- ples for evaluating reading comprehension systems. In Proceedings of the 2017 Conference on Empiri- cal Methods in Natural Language Processing, pages 2021-2031.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1412.6980" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Lcqmc: A large-scale chinese question matching corpus", |
|
"authors": [ |
|
{ |
|
"first": "Xin", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qingcai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chong", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Huajun", |
|
"middle": [], |
|
"last": "Zeng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jing", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dongfang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Buzhou", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1952--1962", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xin Liu, Qingcai Chen, Chong Deng, Huajun Zeng, Jing Chen, Dongfang Li, and Buzhou Tang. 2018. Lcqmc: A large-scale chinese question matching corpus. In Proceedings of the 27th International Conference on Computational Linguistics, pages 1952-1962.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Encoding sentences with graph convolutional networks for semantic role labeling", |
|
"authors": [ |
|
{ |
|
"first": "Diego", |
|
"middle": [], |
|
"last": "Marcheggiani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Titov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1506--1515", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diego Marcheggiani and Ivan Titov. 2017. Encoding sentences with graph convolutional networks for se- mantic role labeling. In Proceedings of the 2017 Conference on Empirical Methods in Natural Lan- guage Processing, pages 1506-1515.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Improved word representation learning with sememes", |
|
"authors": [ |
|
{ |
|
"first": "Yilin", |
|
"middle": [], |
|
"last": "Niu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruobing", |
|
"middle": [], |
|
"last": "Xie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "2049--2058", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yilin Niu, Ruobing Xie, Zhiyuan Liu, and Maosong Sun. 2017. Improved word representation learning with sememes. In Proceedings of the 55th Annual Meeting of the Association for Computational Lin- guistics (Volume 1: Long Papers), volume 1, pages 2049-2058.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Openhownet: An open sememe-based lexical knowledge base", |
|
"authors": [ |
|
{ |
|
"first": "Fanchao", |
|
"middle": [], |
|
"last": "Qi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chenghao", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qiang", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhendong", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1901.09957" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fanchao Qi, Chenghao Yang, Zhiyuan Liu, Qiang Dong, Maosong Sun, and Zhendong Dong. 2019. Openhownet: An open sememe-based lexical knowl- edge base. arXiv preprint arXiv:1901.09957.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Overview of the nlpcc 2017 shared task: Chinese news headline categorization", |
|
"authors": [ |
|
{ |
|
"first": "Xipeng", |
|
"middle": [], |
|
"last": "Qiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingjing", |
|
"middle": [], |
|
"last": "Gong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xuanjing", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "National CCF Conference on Natural Language Processing and Chinese Computing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "948--953", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xipeng Qiu, Jingjing Gong, and Xuanjing Huang. 2017. Overview of the nlpcc 2017 shared task: Chinese news headline categorization. In National CCF Con- ference on Natural Language Processing and Chi- nese Computing, pages 948-953. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Improving language understanding by generative pre-training", |
|
"authors": [ |
|
{ |
|
"first": "Alec", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karthik", |
|
"middle": [], |
|
"last": "Narasimhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Salimans", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. 2018. Improving language under- standing by generative pre-training. URL https://s3- us-west-2. amazonaws. com/openai-assets/research- covers/languageunsupervised/language understand- ing paper. pdf.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Linguistic input features improve neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the First Conference on Machine Translation", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "83--91", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich and Barry Haddow. 2016. Linguistic input features improve neural machine translation. In Proceedings of the First Conference on Machine Translation: Volume 1, Research Papers, pages 83- 91.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Linguistically-informed self-attention for semantic role labeling", |
|
"authors": [ |
|
{ |
|
"first": "Emma", |
|
"middle": [], |
|
"last": "Strubell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Verga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Andor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Weiss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5027--5038", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emma Strubell, Patrick Verga, Daniel Andor, David Weiss, and Andrew McCallum. 2018. Linguistically-informed self-attention for semantic role labeling. In Proceedings of the 2018 Confer- ence on Empirical Methods in Natural Language Processing, pages 5027-5038.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Thulac: An efficient lexical analyzer for chinese", |
|
"authors": [ |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xinxiong", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kaixu", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhipeng", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maosong Sun, Xinxiong Chen, Kaixu Zhang, Zhipeng Guo, and Zhiyuan Liu. 2016. Thulac: An efficient lexical analyzer for chinese. Technical report, Tech- nical Report. Technical Report.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Improved semantic representations from tree-structured long short-term memory networks", |
|
"authors": [ |
|
{ |
|
"first": "Kai Sheng", |
|
"middle": [], |
|
"last": "Tai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1556--1566", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kai Sheng Tai, Richard Socher, and Christopher D Manning. 2015. Improved semantic representations from tree-structured long short-term memory net- works. In Proceedings of the 53rd Annual Meet- ing of the Association for Computational Linguistics and the 7th International Joint Conference on Natu- ral Language Processing (Volume 1: Long Papers), pages 1556-1566.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Words similarity algorithm based on tongyici cilin in semantic web adaptive learning system", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Jiu-Le Tian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Journal of Jilin University", |
|
"volume": "28", |
|
"issue": "6", |
|
"pages": "602--608", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiu-le Tian and Wei Zhao. 2010. Words similarity al- gorithm based on tongyici cilin in semantic web adaptive learning system. Journal of Jilin Univer- sity(Information Science Edition), 28(6):602-608.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Pro- cessing Systems, pages 5998-6008.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Lexical sememe prediction via word embeddings and matrix factorization", |
|
"authors": [ |
|
{ |
|
"first": "Ruobing", |
|
"middle": [], |
|
"last": "Xie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xingchi", |
|
"middle": [], |
|
"last": "Yuan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "IJCAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4200--4206", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ruobing Xie, Xingchi Yuan, Zhiyuan Liu, and Maosong Sun. 2017. Lexical sememe prediction via word embeddings and matrix factorization. In IJCAI, pages 4200-4206.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Xlnet: Generalized autoregressive pretraining for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Russ", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc V", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5754--5764", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Car- bonell, Russ R Salakhutdinov, and Quoc V Le. 2019. Xlnet: Generalized autoregressive pretraining for language understanding. In Advances in neural in- formation processing systems, pages 5754-5764.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Performance of Transformer and Transformer-SEP with different amounts of training data. More significant improvements can be achieved on tasks that depend more on word-level semantics. X-axis: Percent of supervised training data. Y-axis: Accuracy. The error bars indicate the 95% confidence interval.", |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF1": { |
|
"text": "Experimental results on different tasks. Transformer, Transformer-SE, Transformer-SP and Transformer-SEP refers to the vanilla Transformer model (base), Transformer with aggregated sememe embeddings, Transformer with sememe prediction auxiliary task and the hybrid model, respectively. We also compare sememe decomposition to character decomposition for our best model and demonstrate advantages of our methods.", |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table><tr><td>Task</td><td>Language Modeling</td><td>Headline Categorization</td><td>Sentiment Classification</td><td>Semantic Matching</td><td>Sememe Prediction</td></tr><tr><td>Metric</td><td>PPL</td><td>ACC (%)</td><td>ACC (%)</td><td>ACC (%)</td><td>MAP (%)</td></tr><tr><td>Transformer</td><td>49.01</td><td>71.5</td><td>52.7</td><td>81.2</td><td>40.1</td></tr><tr><td>Transformer-SE</td><td>47.37</td><td>72.6</td><td>53.7</td><td>82.6</td><td>52.1</td></tr><tr><td>Transformer-SP</td><td>49.14</td><td>72.3</td><td>53.0</td><td>81.8</td><td>40.3</td></tr><tr><td>Transformer-SEP</td><td>46.53</td><td>72.6</td><td>54.9</td><td>83.3</td><td>52.8</td></tr><tr><td>+ Sememe2Char</td><td>48.90</td><td>72.3</td><td>52.2</td><td>81.2</td><td>-</td></tr></table>", |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"text": "Adversarial test for the base model and our best model (i.e., Transformer v.s.", |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |