|
{ |
|
"paper_id": "2019", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:54:24.508926Z" |
|
}, |
|
"title": "Study on Pre-trained Word Vector Model Applied to Intent Detection of Customer Service Dialogue System", |
|
"authors": [ |
|
{ |
|
"first": "Guan-Yu", |
|
"middle": [], |
|
"last": "\u9673\u51a0\u5b87", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "\u4e2d\u83ef\u96fb\u4fe1\u7814\u7a76\u9662 \u5de8\u91cf\u8cc7\u6599\u7814\u7a76\u6240 Telecommunication Laboratories", |
|
"institution": "Chunghwa Telecom Co., Ltd", |
|
"location": { |
|
"country": "Taiwan, R.O.C" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "\u4e2d\u83ef\u96fb\u4fe1\u7814\u7a76\u9662 \u5de8\u91cf\u8cc7\u6599\u7814\u7a76\u6240 Telecommunication Laboratories", |
|
"institution": "Chunghwa Telecom Co., Ltd", |
|
"location": { |
|
"country": "Taiwan, R.O.C" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Min-Feng", |
|
"middle": [], |
|
"last": "Kuo", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "\u4e2d\u83ef\u96fb\u4fe1\u7814\u7a76\u9662 \u5de8\u91cf\u8cc7\u6599\u7814\u7a76\u6240 Telecommunication Laboratories", |
|
"institution": "Chunghwa Telecom Co., Ltd", |
|
"location": { |
|
"country": "Taiwan, R.O.C" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "\u694a\u5b97\u61b2", |
|
"middle": [], |
|
"last": "Tsung-Hsien", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "\u4e2d\u83ef\u96fb\u4fe1\u7814\u7a76\u9662 \u5de8\u91cf\u8cc7\u6599\u7814\u7a76\u6240 Telecommunication Laboratories", |
|
"institution": "Chunghwa Telecom Co., Ltd", |
|
"location": { |
|
"country": "Taiwan, R.O.C" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "\u9673\u4fca\u52f3", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "\u4e2d\u83ef\u96fb\u4fe1\u7814\u7a76\u9662 \u5de8\u91cf\u8cc7\u6599\u7814\u7a76\u6240 Telecommunication Laboratories", |
|
"institution": "Chunghwa Telecom Co., Ltd", |
|
"location": { |
|
"country": "Taiwan, R.O.C" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Chun-Hsun", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "\u4e2d\u83ef\u96fb\u4fe1\u7814\u7a76\u9662 \u5de8\u91cf\u8cc7\u6599\u7814\u7a76\u6240 Telecommunication Laboratories", |
|
"institution": "Chunghwa Telecom Co., Ltd", |
|
"location": { |
|
"country": "Taiwan, R.O.C" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "\u5ed6\u5b9c\u658c I-Bin", |
|
"middle": [], |
|
"last": "Liao", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "\u4e2d\u83ef\u96fb\u4fe1\u7814\u7a76\u9662 \u5de8\u91cf\u8cc7\u6599\u7814\u7a76\u6240 Telecommunication Laboratories", |
|
"institution": "Chunghwa Telecom Co., Ltd", |
|
"location": { |
|
"country": "Taiwan, R.O.C" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "In recent years, the concept of dialogue business has arisen among major technology giants, and the way of human-computer interaction has changed from a graphical interface to a", |
|
"pdf_parse": { |
|
"paper_id": "2019", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "In recent years, the concept of dialogue business has arisen among major technology giants, and the way of human-computer interaction has changed from a graphical interface to a", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "dialogue interaction interface. Therefore, natural language has become a key factor in the human-computer interaction interface. However, teaching the machine to communicate with humans to accomplish a specific task can be quite challenging. One of the difficulties that needs to overcome is natural language understanding, including how to identify what questions users are asking and how to get information hidden between words. It is important to let the machine know the user's intentions and information.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The dataset of this study is collected from the dialogue of customer service materials. User's intents are recognized by deep learning models. In order to process Chinese unknown words more effectively and reduce false recognition, this study compares different pre-training vector models and deep learning models to understand user's intents. Compared with the use of random word embedding, the correct rate of using BERT-WWM-Chinese (BWC) model is improved by nearly 10%. It shows that the semantic vector generated by BWC model can better represent the relationship between user's words. The recognition rate of user's intent raises because similar vectors can be generated from similar words. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://dumps.wikimedia.org/zhwiki/ 2 https://taku910.github.io/crfpp/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://ltp-cloud.com/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://radimrehurek.com/gensim/models/word2vec.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Learning end-to-end goal-oriented dialog", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Bordes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y.-L", |
|
"middle": [], |
|
"last": "Boureau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1605.07683" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Bordes, Y.-L. Boureau, and J. Weston, \"Learning end-to-end goal-oriented dialog,\" arXiv preprint arXiv:1605.07683, 2016.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Building End-To-End Dialogue Systems Using Generative Hierarchical Neural Network Models", |
|
"authors": [ |
|
{ |
|
"first": "I", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Serban", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Sordoni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Courville", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Pineau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "AAAI", |
|
"volume": "16", |
|
"issue": "", |
|
"pages": "3776--3784", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "I. V. Serban, A. Sordoni, Y. Bengio, A. C. Courville, and J. Pineau, \"Building End-To-End Dialogue Systems Using Generative Hierarchical Neural Network Models,\" in AAAI, 2016, vol. 16, pp. 3776-3784.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "A network-based end-to-end trainable task-oriented dialogue system", |
|
"authors": [ |
|
{ |
|
"first": "T.-H", |
|
"middle": [], |
|
"last": "Wen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1604.04562" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "T.-H. Wen et al., \"A network-based end-to-end trainable task-oriented dialogue system,\" arXiv preprint arXiv:1604.04562, 2016.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "End-to-end task-completion neural dialogue systems", |
|
"authors": [ |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y.-N", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Celikyilmaz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1703.01008" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "X. Li, Y.-N. Chen, L. Li, J. Gao, and A. Celikyilmaz, \"End-to-end task-completion neural dialogue systems,\" arXiv preprint arXiv:1703.01008, 2017.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Dialogue Intent Classification with Long Short-Term Memory Networks", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Meng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Natural Language Processing and Chinese Computing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "42--50", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "L. Meng and M. Huang, \"Dialogue Intent Classification with Long Short-Term Memory Networks,\" Cham, 2018: Springer International Publishing, in Natural Language Processing and Chinese Computing, pp. 42-50.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Dialogue act sequence labeling using hierarchical encoder with crf", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Agarwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Dasgupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Thirty-Second AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "H. Kumar, A. Agarwal, R. Dasgupta, and S. Joshi, \"Dialogue act sequence labeling using hierarchical encoder with crf,\" in Thirty-Second AAAI Conference on Artificial Intelligence, 2018.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "On the effects of using word2vec representations in neural networks for dialogue act recognition", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Cerisara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Kral", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Lenc", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Computer Speech & Language", |
|
"volume": "47", |
|
"issue": "", |
|
"pages": "175--193", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C. Cerisara, P. Kral, and L. Lenc, \"On the effects of using word2vec representations in neural networks for dialogue act recognition,\" Computer Speech & Language, vol. 47, pp. 175-193, 2018.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Neural attention models for sequence classification: Analysis and application to key term extraction and dialogue act detection", |
|
"authors": [ |
|
{ |
|
"first": "S.-S", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H.-Y", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1604.00077" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S.-s. Shen and H.-y. Lee, \"Neural attention models for sequence classification: Analysis and application to key term extraction and dialogue act detection,\" arXiv preprint arXiv:1604.00077, 2016.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Efficient estimation of word representations in vector space", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1301.3781" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "T. Mikolov, K. Chen, G. Corrado, and J. Dean, \"Efficient estimation of word representations in vector space,\" arXiv preprint arXiv:1301.3781, 2013.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M.-W", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.04805" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Devlin, M.-W. Chang, K. Lee, and K. Toutanova, \"Bert: Pre-training of deep bidirectional transformers for language understanding,\" arXiv preprint arXiv:1810.04805, 2018.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Deep contextualized word representations", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Peters", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1802.05365" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. E. Peters et al., \"Deep contextualized word representations,\" arXiv preprint arXiv:1802.05365, 2018.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Improving language understanding by generative pre-training", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Narasimhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Salimans", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Radford, K. Narasimhan, T. Salimans, and I. Sutskever, \"Improving language understanding by generative pre-training,\" URL https://s3-us-west-2. amazonaws. com/openai-assets/researchcovers/languageunsupervised/language understanding paper. pdf, 2018.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Vaswani et al., \"Attention is all you need,\" in Advances in neural information processing systems, 2017, pp. 5998-6008.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Pre-Training with Whole Word Masking for Chinese BERT", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Cui", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1906.08101" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Y. Cui et al., \"Pre-Training with Whole Word Masking for Chinese BERT,\" arXiv preprint arXiv:1906.08101, 2019.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Long short-term memory", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Hochreiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Neural computation", |
|
"volume": "9", |
|
"issue": "8", |
|
"pages": "1735--1780", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Hochreiter and J. Schmidhuber, \"Long short-term memory,\" Neural computation, vol. 9, no. 8, pp. 1735-1780, 1997.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": {} |
|
} |
|
} |