|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:26:50.870923Z" |
|
}, |
|
"title": "Using Transfer Learning to Improve Deep Neural Networks for Lyrics Emotion Recognition in Chinese", |
|
"authors": [ |
|
{ |
|
"first": "\u5ed6\u5bb6\u8abc", |
|
"middle": [ |
|
"\uf02a" |
|
], |
|
"last": "\u3001\u6797\u4e9e\u5ba3\u3001\u6797\u51a0\u6210\u3001\u5f35\u5bb6\u744b", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National Taichung University of Science and Technology", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jia-Yi", |
|
"middle": [], |
|
"last": "Liao", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National Taichung University of Science and Technology", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ya-Hsuan", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National Taichung University of Science and Technology", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Kuan-Cheng", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National Taichung University of Science and Technology", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jia-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National Taichung University of Science and Technology", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Emotion is an important attribute in music information retrieval. Deep learning methods have been widely used in the automatic recognition of music emotion. Most of the studies focus on the audio data, the role of lyrics in music emotion classification remains under-appreciated. Due to the richness of English language resources, most previous studies were based on English lyrics but rarely in Chinese. This study proposes an approach without specific training for the Chinese lyrics", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Emotion is an important attribute in music information retrieval. Deep learning methods have been widely used in the automatic recognition of music emotion. Most of the studies focus on the audio data, the role of lyrics in music emotion classification remains under-appreciated. Due to the richness of English language resources, most previous studies were based on English lyrics but rarely in Chinese. This study proposes an approach without specific training for the Chinese lyrics", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "\u97f3\u6a02\u548c\u4eba\u985e\u60c5\u7dd2\u76f8\u4e92\u5f71\u97ff\uff0c\u5728\u751f\u6d3b\u4e2d\u626e\u6f14\u4e0d\u53ef\u6216\u7f3a\u7684\u89d2\u8272\u3002\u97f3\u6a02\u7684\u641c\u5c0b\u901a\u5e38\u4ee5\u6b4c\u66f2\u6a19\u984c\u3001 \u8a5e\u66f2\u4f5c\u8005\u3001\u6f14\u5531\u8005\u548c\u6f14\u594f\u6d41\u6d3e\u9032\u884c\u6aa2\u7d22\uff0c\u7136\u800c\uff0c\u60c5\u7dd2\u53ef\u4ee5\u4f5c\u70ba\u97f3\u6a02\u7684\u4e00\u500b\u65b0\u4e14\u91cd\u8981\u7684\u641c \u5c0b\u5c6c\u6027\u3002\u96a8\u8457\u97f3\u6a02\u4e32\u6d41\u5e73\u53f0\u4f7f\u7528\u8005\u548c\u6b4c\u66f2\u5eab\u7684\u7206\u70b8\u5f0f\u589e\u9577\uff0c\u50b3\u7d71\u7684\u7531\u5c08\u5bb6\u9032\u884c\u60c5\u7dd2\u6a19\u8a3b \u5df2\u4e0d\u80fd\u6eff\u8db3\u5be6\u969b\u9700\u6c42\uff0c\u63a8\u85a6\u7cfb\u7d71\u9700\u8981\u66f4\u5feb\u901f\u7684\u6a19\u8a3b\u65b9\u6cd5\uff0c\u81ea\u52d5\u60c5\u7dd2\u8fa8\u8b58\u56e0\u6b64\u6210\u70ba\u91cd\u8981\u7684 \u8b70\u984c\u3002\u97f3\u6a02\u60c5\u7dd2\u8fa8\u8b58(Music Emotion Recognition) \u7528\u65bc\u89c0\u5bdf\u97f3\u6a02\u8207\u4eba\u985e\u60c5\u611f\u4e4b\u76f8\u95dc\u6027\u3001\u5c0d \u97f3\u6a02\u62bd\u53d6\u7279\u5fb5\u4e26\u52a0\u4ee5\u5206\u6790\u627e\u51fa\u97f3\u6a02\u7279\u5fb5\u8207\u4eba\u985e\u5c0d\u65bc\u97f3\u6a02\u60c5\u7dd2\u611f\u77e5\u7684\u95dc\u806f\u3002\u76ee\u524d\u6a5f\u5668\u5b78\u7fd2 \u548c\u6df1\u5ea6\u5b78\u7fd2\u65b9\u6cd5\u5df2\u88ab\u5ee3\u6cdb\u7528\u65bc\u8fa8\u8b58\u97f3\u6a02\u7684\u60c5\u7dd2\u3002 \u652f \u6301 \u5411 \u91cf \u6a5f (Support Vector Machine, SVM) \u548c \u652f \u6301 \u5411 \u91cf \u56de \u6b78 (Support Vector Regression, SVR)\u7b49\u6a5f\u5668\u5b78\u7fd2\u65b9\u6cd5 (Han et al., 2009) \u3002\u57fa\u65bc\u6b4c\u8a5e\u548c\u97f3\u8a0a\u7684\u6b4c\u66f2\u60c5\u7dd2\u6aa2\u6e2c\u65b9\u6cd5 \u4f86\u8a08\u7b97\u6548\u50f9(Valence)\u548c\u559a\u9192(Arousal)\u9032\u884c\u97f3\u6a02\u4e4b\u60c5\u7dd2\u5206\u985e (Jamdar et al., 2015) \u3002\u7528\u5377\u7a4d\u795e \u7d93\u7db2\u8def\u9810\u8a13\u7df4\u6a21\u578b\u5c0d\u6bcf 30 \u79d2\u526a\u8f2f\u7684\u5370\u5ea6\u53e4\u5178\u97f3\u6a02\u9032\u884c\u97f3\u6a02\u60c5\u7dd2\u5206\u985e (Sarkar et al., 2021) \u3002 \u4e0a\u8ff0\u7814\u7a76\u5927\u591a\u90fd\u96c6\u4e2d\u5229\u7528\u8072\u5b78\u7279\u5fb5\u9032\u884c\u97f3\u6a02\u60c5\u7dd2\u8fa8\u8b58\u4e26\u7121\u8a0e\u8ad6\u6b4c\u8a5e\u5c0d\u65bc\u60c5\u7dd2\u7684\u5f71\u97ff\u3002\u6b4c \u8a5e\u88ab\u8ce6\u4e88\u60c5\u7dd2\uff0c\u5728\u5f15\u767c\u4eba\u985e\u7684\u60c5\u7dd2\u4ee5\u53ca\u9810\u6e2c\u97f3\u6a02\u60c5\u7dd2\u626e\u6f14\u8457\u91cd\u8981\u7684\u89d2\u8272 (Hu & Downie, 2010 )\u3002\u96d6\u7136\u65cb\u5f8b\u548c\u6b4c\u8a5e\u6703\u540c\u6642\u5c0d\u807d\u773e\u7522\u751f\u5f71\u97ff\uff0c\u4f46\u807d\u773e\u5c0d\u65bc\u6b4c\u8a5e\u5167\u5bb9\u7684\u504f\u597d\u80fd\u9032\u4e00\u6b65\u53cd \u6620\u807d\u773e\u7684\u7279\u5fb5\u548c\u50be\u5411 (Qiu et al., 2019) (Yang et al., 2008) \u3002\u73fe\u6709\u7684\u7814\u7a76\u5927\u591a\u63a1\u7528 Russell (1980 ) \u6240\u63d0\u51fa\u7684\u5fc3\u7406\u5b78\u74b0\u7e5e\u6a21\u578b\u3002Laurier et al. (2009 \u7684\u7814\u7a76\u4e2d\u8868\u660e\uff0cRussell \u5fc3\u7406\u5b78\u60c5\u7dd2\u6a21\u578b\u53ef\u4ee5\u7528\u65bc\u60c5\u7dd2\u5206\u6790\u6216\u97f3\u6a02\u60c5\u7dd2\u8fa8\u8b58 \u4efb\u52d9\u3002\u8a72\u7dad\u5ea6\u6a21\u578b\u7684\u5169\u500b\u7dad\u5ea6\u7684\u9023\u7e8c\u6578\u503c\uff0c\u5206\u5225\u70ba\u6548\u50f9(Valence)\u548c\u559a\u9192(Arousal)\u3002\u6548\u50f9 (Valence)\u4ee3\u8868\u6240\u6709\u60c5\u7dd2\u9ad4\u9a57\u6240\u56fa\u6709\u7684\u7a4d\u6975\u6216\u6d88\u6975\uff0c\u9ad8\u6548\u50f9(Valence)\u7684\u6b4c\u66f2\u807d\u8d77\u4f86\u66f4\u70ba\u7a4d \u6975\u3001\u5feb\u6a02\uff0c\u4f4e\u6548\u50f9(Valence)\u7684\u6b4c\u66f2\u807d\u8d77\u4f86\u8f03\u6cae\u55aa\u3001\u61a4\u6012\u3002\u559a\u9192(Arousal)\u4ee3\u8868\u60c5\u7dd2\u7684\u6fc0\u52d5\u7a0b \u5ea6\uff0c\u6b4c\u66f2\u7684\u80fd\u91cf(energy)\u80fd\u5c0d\u61c9\u65bc\u559a\u9192(Arousal)\u503c\uff0c\u4ee3\u8868\u6b4c\u66f2\u5f37\u5ea6\uff0c\u80fd\u91cf\u9ad8\u7684\u6b4c\u66f2\u901a\u5e38\u8d8a \u5feb\u901f\u3001\u97ff\u4eae\u548c\u5f37\u70c8 (Kim et al., 2011) Figure 1 . The circumplex model of affect (Russell, 1980) ", |
|
"cite_spans": [ |
|
{ |
|
"start": 367, |
|
"end": 385, |
|
"text": "(Han et al., 2009)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 440, |
|
"end": 461, |
|
"text": "(Jamdar et al., 2015)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 501, |
|
"end": 522, |
|
"text": "(Sarkar et al., 2021)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 596, |
|
"end": 614, |
|
"text": "(Hu & Downie, 2010", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 663, |
|
"end": 681, |
|
"text": "(Qiu et al., 2019)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 682, |
|
"end": 701, |
|
"text": "(Yang et al., 2008)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 713, |
|
"end": 726, |
|
"text": "Russell (1980", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 727, |
|
"end": 761, |
|
"text": ") \u6240\u63d0\u51fa\u7684\u5fc3\u7406\u5b78\u74b0\u7e5e\u6a21\u578b\u3002Laurier et al. (2009", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1006, |
|
"end": 1024, |
|
"text": "(Kim et al., 2011)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 1067, |
|
"end": 1082, |
|
"text": "(Russell, 1980)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1025, |
|
"end": 1033, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "\u7dd2\u8ad6 (Introduction)", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "\u3002 \u5716 1. Russell \u63d0\u51fa\u4e4b\u5fc3\u7406\u5b78\u7dad\u5ea6\u60c5\u7dd2\u6a21\u578b [", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u7dd2\u8ad6 (Introduction)", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "] \u5982\u5716 1 \u6240\u793a\uff0c\u60c5\u7dd2\u7531\u6548\u50f9(Valence)\u548c\u559a\u9192(Arousal)\u5169\u500b\u7dad\u5ea6\u8868\u793a\uff0c\u60c5\u7dd2\u5e73\u9762\u88ab\u5206\u70ba \u56db\u500b\u8c61\u9650\uff0c\u5275\u5efa\u4e86\u56db\u500b\u60c5\u7dd2\u985e\u5225\u7a7a\u9593\u3002\u5728 \u00c7ano & Morisio (2017a)\u7684\u7814\u7a76\u4e2d\u57fa\u65bc Russell \u7dad \u5ea6\u60c5\u7dd2\u6a21\u578b\u7684\u56db\u500b\u8c61\u9650\u5c07\u60c5\u7dd2\u5206\u70ba\u56db\u985e\u5225(Q1\u3001Q2\u3001Q3\u3001Q4)\uff0c\u5206\u5225\u70ba\u5feb\u6a02\u3001\u61a4\u6012\u3001\u60b2\u50b7 \u548c\u8f15\u9b06\u3002\u56e0\u6b64\uff0c\u672c\u7814\u7a76\u5728\u6b4c\u8a5e\u9a57\u8b49\u7684\u90e8\u5206\u4e5f\u4f9d\u6b64\u65b9\u6cd5\u5c07\u6b4c\u8a5e\u60c5\u7dd2\u5206\u70ba\u56db\u500b\u8c61\u9650\u985e\u5225\u3002", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u7dd2\u8ad6 (Introduction)", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "\u904e\u53bb\u6587\u672c\u60c5\u7dd2\u5206\u6790\u662f\u4f7f\u7528\u57fa\u65bc\u7d71\u8a08\u7684\u8a5e\u888b\u6a21\u578b\u548c\u975c\u614b\u7279\u5fb5\u7684\u8a5e\u5411\u91cf\u6a21\u578b\u5c07\u6587\u672c\u8f49\u70ba\u5411\u91cf \u7279\u5fb5 (Barry, 2017; Han et al., 2013) \uff0c\u4f46\u9019\u4e9b\u65b9\u6cd5\u6703\u9047\u5230\u7121\u6cd5\u89e3\u8b80\u591a\u7fa9\u8a5e\u7684\u74f6\u9838\u3002\u6b4c\u8a5e\u88ab\u8996 \u70ba\u662f\u6558\u4e8b\u800c\u975e\u5f7c\u6b64\u7368\u7acb\u7684\u53e5\u5b50\uff0c\u9700\u8981\u6355\u6349\u4e0a\u4e0b\u6587\u7684\u4f9d\u8cf4\u95dc\u4fc2\uff0c\u5728\u6b4c\u8a5e\u7684\u97f3\u6a02\u60c5\u7dd2\u5206\u985e\u4efb \u52d9\u4e0a\uff0c\u82e5\u57fa\u65bc\u50b3\u7d71\u8a5e\u5178\u9032\u884c\u6548\u679c\u6709\u9650 (Hu & Downie, 2010; Hu et al., 2009) \uff0cAbdillah et al.", |
|
"cite_spans": [ |
|
{ |
|
"start": 42, |
|
"end": 55, |
|
"text": "(Barry, 2017;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 56, |
|
"end": 73, |
|
"text": "Han et al., 2013)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 155, |
|
"end": 174, |
|
"text": "(Hu & Downie, 2010;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 175, |
|
"end": 191, |
|
"text": "Hu et al., 2009)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u57fa\u65bcTransformer\u4e4b\u6a21\u578b (Transformer-based Model)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "(2020) \u904b \u7528 \u80fd \u6355 \u6349 \u6642 \u5e8f\u95dc \u4fc2 \u7684 \u96d9 \u5411\u9577 \u77ed \u671f \u8a18 \u61b6 (Long Short-Term Memory \uff0c LSTM)\u5c0d MoodyLyrics \u8cc7\u6599\u96c6(\u00c7ano & Morisio, 2017b)\u9032\u884c\u6b4c\u8a5e\u7684\u60c5\u7dd2\u5206\u985e\uff0c\u4f46\u905e\u6b78\u67b6\u69cb\u96e3\u4ee5\u5177\u5099", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u57fa\u65bcTransformer\u4e4b\u6a21\u578b (Transformer-based Model)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u5e73\u884c\u904b\u7b97\u7684\u80fd\u529b\u3002Transformer (Vaswani et al., 2017) \u5247\u6539\u8b8a\u904e\u53bb\u5e8f\u5217\u7db2\u8def\u7684\u505a\u6cd5\uff0c\u81ea\u6ce8\u610f \u529b\u6a5f\u5236\u85c9\u7531 Scaled dot-product Attention \u8b93\u8cc7\u6599\u5f97\u4ee5\u5e73\u884c\u904b\u7b97\uff0c \u8003\u616e\u8a5e\u5728\u4e0d\u540c\u7a7a\u9593\u6620\u5c04 \u7684\u91cd\u8981\u6027\uff0c\u5141\u8a31 BERT (Devlin et al., 2018) ", |
|
"cite_spans": [ |
|
{ |
|
"start": 20, |
|
"end": 42, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 130, |
|
"end": 151, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u57fa\u65bcTransformer\u4e4b\u6a21\u578b (Transformer-based Model)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u7d93\u9077\u79fb\u5b78\u7fd2 CVAT \u6a21\u578b\u8207\u672a\u7d93\u9077\u79fb\u5b78\u7fd2 CVAT \u6a21\u578b\u7684\u6b4c\u8a5e\u60c5\u7dd2\u5206\u985e\u7d50\u679c\uff0c\u5982\u8868 6 \u6240 \u793a\uff0c\u7d93\u9077\u79fb\u5b78\u7fd2\u7684 CVAT \u6a21\u578b\u5728\u6b4c\u8a5e\u60c5\u7dd2\u5206\u985e\u7684\u6e96\u78ba\u5ea6\u70ba 0.71\uff0c\u6a19\u7c64 Q1 \u548c Q4 \u7684 F1- score \u8f03\u4f4e\uff0c\u5206\u5225\u70ba 0.69 \u548c 0.51\uff0c\u800c Q2 \u548c Q3 \u7684 F1-score \u8f03\u9ad8\uff0c\u5206\u5225\u70ba 0.83 \u548c 0.72\u3002\u672a \u7d93\u9077\u79fb\u5b78\u7fd2\u7684 CVAT \u6a21\u578b\u5728\u6b4c\u8a5e\u60c5\u7dd2\u5206\u985e\u7684\u6e96\u78ba\u5ea6\u70ba 0.50\uff0c\u540c\u6a23\u662f\u6a19\u7c64 Q1 \u548c Q4 \u7684 F1- score \u8f03\u4f4e\uff0c\u5206\u5225\u70ba 0.41 \u548c 0.29\uff0c\u800c Q2 \u548c Q3 \u7684 F1-score \u8f03\u9ad8\uff0c\u5206\u5225\u70ba 0.64 \u548c 0.55\u3002\u6bd4 \u8f03\u7d93\u9077\u79fb\u5b78\u7fd2\u7684\u6a21\u578b\u8207\u672a\u7d93\u9077\u79fb\u5b78\u7fd2\u7684\u6a21\u578b\uff0c\u7d93\u9077\u79fb\u5b78\u7fd2\u7684\u6a21\u578b\u4e2d\u6bcf\u4e00\u500b\u60c5\u7dd2\u6a19\u7c64\u7684\u5206 \u985e\u7d50\u679c\u90fd\u512a\u65bc\u672a\u7d93\u9077\u79fb\u5b78\u7fd2\u7684\u6a21\u578b\uff0c\u53ef\u5f97\u77e5\u5230\u5728\u8a13\u7df4\u968e\u6bb5 CVAT \u6a21\u578b\u5b78\u7fd2\u6548\u679c\u8f03\u4f73\u7684\u6a21 \u578b \uff0c \u61c9 \u7528 \u5728 \u6b4c \u8a5e \u7684 \u60c5 \u7dd2 \u5206 \u985e \u4e5f \u80fd \u5f97 \u5230 \u8f03 \u4f73 \u7684 \u7d50 \u679c \uff0c \u8868 \u793a \u7d93 \u9077 \u79fb \u5b78 \u7fd2 \u7684 \u6a21 \u578b \u5728 CVAW+CVAP \u8cc7\u6599\u96c6\u4e2d\u6240\u5b78\u7fd2\u5230\u7684\u4e2d\u6587\u60c5\u7dd2\u7279\u5fb5\uff0c\u6709\u52a9\u65bc\u63d0\u5347\u6a21\u578b\u5728\u6b4c\u8a5e\u6587\u672c\u7684\u60c5\u7dd2\u8fa8 \u8b58\u80fd\u529b\u3002 \u8868 5. \u6b4c\u8a5e\u60c5\u7dd2\u5206\u985e\u4e4b\u6df7\u80b4\u77e9\u9663\uff1a\u7d93\u9077\u79fb\u5b78\u7fd2\u8207\u672a\u7d93\u9077\u79fb\u5b78\u7fd2 [", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u57fa\u65bcTransformer\u4e4b\u6a21\u578b (Transformer-based Model)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u5f9e\u55ae\u8f38\u51fa\u6a21\u578b\u67b6\u69cb\u7684\u7d50\u679c\u767c\u73fe\u559a\u9192(Arousal)\u7684\u7279\u5fb5\u8f03\u96e3\u5b78\u7fd2\uff0c\u8a72\u7d50\u679c\u5728\u591a\u500b\u7814\u7a76\u4e2d\u90fd\u6709 \u63d0\u5230 (Malheiro et al., 2016; Yu et al., 2016; \u00c7ano & Morisio, 2017b) ", |
|
"cite_spans": [ |
|
{ |
|
"start": 47, |
|
"end": 70, |
|
"text": "(Malheiro et al., 2016;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 71, |
|
"end": 87, |
|
"text": "Yu et al., 2016;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 88, |
|
"end": 110, |
|
"text": "\u00c7ano & Morisio, 2017b)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u8a0e\u8ad6 (Discussion)", |
|
"sec_num": "5." |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Emotion classification of song lyrics using bidirectional lstm method with glove word representation weighting", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Abdillah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Asror", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [ |
|
"F A" |
|
], |
|
"last": "Wibowo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "723--729", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abdillah, J., Asror, I., & Wibowo, Y. F. A. (2020). Emotion classification of song lyrics using bidirectional lstm method with glove word representation weighting. Jurnal RESTI (Rekayasa Sistem Dan Teknologi Informasi), 4(4), 723-729.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Transformer-based approach towards music emotion recognition from lyrics", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Agrawal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"G R" |
|
], |
|
"last": "Shanker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Alluri", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2101.02051" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Agrawal, Y., Shanker, R. G. R., & Alluri, V. (2021). Transformer-based approach towards music emotion recognition from lyrics. arXiv preprint arXiv:2101.02051.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Sentiment Analysis of Online Reviews Using Bag-of-Words and LSTM Approaches", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Barry", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "AICS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "272--274", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Barry, J. (2017). Sentiment Analysis of Online Reviews Using Bag-of-Words and LSTM Approaches. In AICS, 272-274.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Med3d: Transfer learning for 3d medical image analysis", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1904.00625" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chen, S., Ma, K., & Zheng, Y. (2019). Med3d: Transfer learning for 3d medical image analysis. arXiv preprint arXiv:1904.00625.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.04805" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Devlin, J., Chang, M. W., Lee, K., & Toutanova, K. (2018). Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Transfer learning for time series classification. arXiv e-prints", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"I" |
|
], |
|
"last": "Fawaz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Forestier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Weber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Idoumghar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Muller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1811.01533" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fawaz, H. I., Forestier, G., Weber, J., Idoumghar, L., & Muller, P. A. (2018). Transfer learning for time series classification. arXiv e-prints, arXiv:1811 .01533.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "SMERS: Music Emotion Recognition Using Support Vector Regression", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Rho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Dannenberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Hwang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of 10 th International Society for Music Information Retrieval Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "651--656", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Han, B. J., Rho, S., Dannenberg, R. B., & Hwang, E. (2009). SMERS: Music Emotion Recognition Using Support Vector Regression. In Proceedings of 10 th International Society for Music Information Retrieval Conference (ISMIR 2009), 651-656.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Codex: Combining an svm classifier and character ngram language models for sentiment analysis on twitter text", |
|
"authors": [ |
|
{ |
|
"first": "Q", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Schuetze", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the Seventh International Workshop on Semantic Evaluation", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "520--524", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Han, Q., Guo, J., & Schuetze, H. (2013). Codex: Combining an svm classifier and character n- gram language models for sentiment analysis on twitter text. In Second Joint Conference on Lexical and Computational Semantics (* SEM), Volume 2: Proceedings of the Seventh International Workshop on Semantic Evaluation (SemEval 2013), 520-524.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "When Lyrics Outperform Audio for Music Mood Classification: A Feature Analysis", |
|
"authors": [ |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Downie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of 11 th International Society for Music Information Retrieval Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "619--624", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hu, X., & Downie, J. S. (2010). When Lyrics Outperform Audio for Music Mood Classification: A Feature Analysis. In Proceedings of 11 th International Society for Music Information Retrieval Conference (ISMIR 2010), 619-624.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Lyric-based Song Emotion Detection with Affective Lexicon and Fuzzy Clustering Method", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of 10 th International Society for Music Information Retrieval Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "123--128", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hu, Y., Chen, X., & Yang, D. (2009). Lyric-based Song Emotion Detection with Affective Lexicon and Fuzzy Clustering Method. In Proceedings of 10 th International Society for Music Information Retrieval Conference (ISMIR 2009), 123-128.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Recognizing learning emotion based on convolutional neural networks and transfer learning", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Hung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [ |
|
"X" |
|
], |
|
"last": "Lai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Applied Soft Computing", |
|
"volume": "84", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.asoc.2019.105724" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hung, J. C., Lin, K. C., & Lai, N. X. (2019). Recognizing learning emotion based on convolutional neural networks and transfer learning. Applied Soft Computing, 84, 105724. https://doi.org/10.1016/j.asoc.2019.105724", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Multi-level transfer learning for improving the performance of deep neural networks: Theory and practice from the tasks of facial emotion recognition and named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Hung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Applied Soft Computing", |
|
"volume": "109", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.asoc.2021.107491" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hung, J. C., & Chang, J. W. (2021). Multi-level transfer learning for improving the performance of deep neural networks: Theory and practice from the tasks of facial emotion recognition and named entity recognition. Applied Soft Computing, 109, 107491. https://doi.org/10.1016/j.asoc.2021.107491", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Emotion analysis of songs based on lyrical and audio features", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Jamdar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Abraham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Khanna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Dubey", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1506.05012" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jamdar, A., Abraham, J., Khanna, K., & Dubey, R. (2015). Emotion analysis of songs based on lyrical and audio features. arXiv preprint arXiv:1506.05012.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Music mood classification model based on arousal-valence values", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Yoo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of 13 th International Conference on Advanced Communication Technology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "292--295", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kim, J., Lee, S., Kim, S., & Yoo, W. Y. (2011). Music mood classification model based on arousal-valence values. In Proceedings of 13 th International Conference on Advanced Communication Technology (ICACT 2011), 292-295.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Music Mood Representations from Social Tags", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Laurier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Sordo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Serra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Herrera", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of 10 th International Society for Music Information Retrieval Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "381--386", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Laurier, C., Sordo, M., Serra, J., & Herrera, P. (2009). Music Mood Representations from Social Tags. In Proceedings of 10 th International Society for Music Information Retrieval Conference (ISMIR 2009), 381-386.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Emotionally-relevant features for classification and regression of music lyrics", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Malheiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Panda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Gomes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Paiva", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "IEEE Transactions on Affective Computing", |
|
"volume": "9", |
|
"issue": "2", |
|
"pages": "240--254", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/TAFFC.2016.2598569" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Malheiro, R., Panda, R., Gomes, P., & Paiva, R. P. (2016). Emotionally-relevant features for classification and regression of music lyrics. IEEE Transactions on Affective Computing, 9(2), 240-254. https://doi.org/10.1109/TAFFC.2016.2598569", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Personality predicts words in favorite songs", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Qiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Ramsay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Journal of Research in Personality", |
|
"volume": "78", |
|
"issue": "", |
|
"pages": "25--35", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.jrp.2018.11.004" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qiu, L., Chen, J., Ramsay, J., & Lu, J. (2019). Personality predicts words in favorite songs. Journal of Research in Personality, 78, 25-35. https://doi.org/10.1016/j.jrp.2018.11.004", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "A circumplex model of affect", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Russell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1980, |
|
"venue": "Journal of personality and social psychology", |
|
"volume": "39", |
|
"issue": "6", |
|
"pages": "1161--1178", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1037/h0077714" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Russell, J. A. (1980). A circumplex model of affect. Journal of personality and social psychology, 39(6), 1161-1178. https://doi.org/10.1037/h0077714", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Neural Network architectures to classify emotions in Indian Classical Music", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2102.00616" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Neural Network architectures to classify emotions in Indian Classical Music. arXiv preprint arXiv:2102.00616.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 31st International Conference on neural information processing systems (NIPS '17", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, L., & Polosukhin, I. (2017). Attention is all you need. In Proceedings of the 31st International Conference on neural information processing systems (NIPS '17), 5998-6008.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "A regression approach to music emotion recognition", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "IEEE Transactions on audio, speech, and language processing", |
|
"volume": "16", |
|
"issue": "", |
|
"pages": "448--457", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/TASL.2007.911513" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang, Y. H., Lin, Y. C., Su, Y. F., & Chen, H. H. (2008). A regression approach to music emotion recognition. IEEE Transactions on audio, speech, and language processing, 16(2), 448-457. https://doi.org/10.1109/TASL.2007.911513", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Building Chinese affective resources in valence-arousal dimensions", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Hao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Lai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational \u5ed6\u5bb6\u8abc \u7b49 Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "540--545", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N16-1066" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yu, L. C., Lee, L. H., Hao, S., Wang, J., He, Y., Hu, J., Lai, K. R., & Zhang, X. (2016). Building Chinese affective resources in valence-arousal dimensions. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational \u5ed6\u5bb6\u8abc \u7b49 Linguistics: Human Language Technologies, 540-545. https://doi.org/10.18653/v1/N16- 1066", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "IJCNLP-2017 Task 2: Dimensional Sentiment Analysis for Chinese Phrases", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Wong", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the IJCNLP 2017, Shared Tasks", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "9--16", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yu, L. C., Lee, L. H., Wang, J., & Wong, K. F. (2017). IJCNLP-2017 Task 2: Dimensional Sentiment Analysis for Chinese Phrases. In Proceedings of the IJCNLP 2017, Shared Tasks, 9-16.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Music mood dataset creation based on last. fm tags", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "\u00c7ano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Morisio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of Fourth International Conference on Artificial Intelligence and Applications", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "15--26", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.5121/csit.2017.70603" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "\u00c7ano, E., & Morisio, M. (2017a). Music mood dataset creation based on last. fm tags. In Proceedings of Fourth International Conference on Artificial Intelligence and Applications (AIAP 2017), 15-26. https://doi.org/10.5121/csit.2017.70603", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Moodylyrics: A sentiment annotated lyrics dataset", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "\u00c7ano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Morisio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 International Conference on Intelligent Systems, Metaheuristics & Swarm Intelligence (ISMSI '17)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "118--124", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3059336.3059340" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "\u00c7ano, E., & Morisio, M. (2017b). Moodylyrics: A sentiment annotated lyrics dataset. In Proceedings of the 2017 International Conference on Intelligent Systems, Metaheuristics & Swarm Intelligence (ISMSI '17), 118-124. http://dx.doi.org/10.1145/3059336.3059340", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF0": { |
|
"type_str": "table", |
|
"html": null, |
|
"text": "\u3002Agrawal et al. (2021)\u63d0\u51fa\u6b4c\u8a5e\u53ef\u8996\u70ba\u4e00\u9023\u4e32\u5f7c\u6b64 \u76f8\u95dc\u7684\u53e5\u5b50\uff0c\u9700\u6355\u6349\u4e0a\u4e0b\u6587\u548c\u9577\u671f\u4f9d\u8cf4\u7684\u95dc\u4fc2\uff0c\u4e26\u5728\u7814\u7a76\u4f7f\u7528 Transformer-based \u7684\u6a21\u578b \u9032\u884c\u6b4c\u8a5e\u60c5\u7dd2\u8fa8\u8b58\uff0c\u5728\u591a\u500b\u82f1\u6587\u6b4c\u8a5e\u60c5\u7dd2\u8cc7\u6599\u96c6\u4e0a\u53d6\u5f97\u826f\u597d\u7684\u6210\u679c\uff0c\u4e0a\u8ff0\u7684\u82f1\u6587\u6b4c\u8a5e\u8cc7 \u6599\u96c6\u7686\u57fa\u65bc Russell (1980)\u7684 Valence-Arousal \u5fc3\u7406\u5b78\u74b0\u7e5e\u60c5\u611f\u6a21\u578b\u9032\u884c\u97f3\u6a02\u60c5\u7dd2\u7684\u6a19\u8a3b\u3002", |
|
"content": "<table><tr><td>2</td></tr><tr><td>\u672c\u7814\u7a76\u63d0\u51fa\u4e00\u4e2d\u6587\u6b4c\u8a5e\u60c5\u7dd2\u5206\u985e\u65b9\u6cd5\u3002\u9996\u5148\uff0c\u904b\u7528\u57fa\u65bc Transformer \u8a9e\u8a00\u9810\u8a13\u7df4\u6a21\u578b</td></tr><tr><td>\u5c0d\u4e2d\u6587\u7dad\u5ea6\u60c5\u7dd2\u5b57\u5178(CVAW)\u8207\u4e2d\u6587\u7dad\u5ea6\u60c5\u7dd2\u7247\u8a9e(CVAP)\u9032\u884c\u5efa\u6a21\uff0c\u5176\u6b21\u5c07\u6a21\u578b\u9077\u79fb\u81f3</td></tr><tr><td>\u4e2d\u6587\u7dad\u5ea6\u60c5\u7dd2\u8a9e\u6599(CVAT)\uff0c\u6700\u5f8c\u5c07\u6a21\u578b\u76f4\u63a5\u7528\u65bc\u7121\u6a19\u8a3b\u7684\u6b4c\u8a5e\u6587\u672c\u9032\u884c\u60c5\u7dd2\u7684\u81ea\u52d5\u6a19\u8a3b\u3002</td></tr><tr><td>\u672c\u7814\u7a76\u5176\u9918\u7ae0\u7bc0\u7684\u7d44\u7e54\u5982\u4e0b\uff1a\u7b2c\u4e8c\u7bc0\u56de\u9867\u4e86\u5fc3\u7406\u5b78\u7dad\u5ea6\u60c5\u7dd2\u6a21\u578b\u3001\u57fa\u65bc Transformer</td></tr><tr><td>\u4e4b\u6a21\u578b\u548c\u9077\u79fb\u5b78\u7fd2\u7684\u76f8\u95dc\u6587\u737b\u3002\u7b2c\u4e09\u7bc0\u7684\u65b9\u6cd5\u8ad6\u8aaa\u660e\u4e86\u672c\u7814\u7a76\u6240\u4f7f\u7528\u7684\u5169\u500b\u8cc7\u6599\u96c6\u3001\u6587</td></tr><tr><td>\u672c\u9810\u8655\u7406\u4e26\u89e3\u91cb\u672c\u7814\u7a76\u63d0\u51fa\u7684\u67b6\u69cb\u3002\u7b2c\u56db\u7bc0\u70ba\u672c\u7814\u7a76\u6a21\u578b\u8a13\u7df4\u548c\u6b4c\u8a5e\u9a57\u8b49\u7684\u7d50\u679c\u3002\u7b2c\u4e94</td></tr><tr><td>\u7bc0\u5c0d\u5be6\u9a57\u7d50\u679c\u9032\u884c\u76f8\u95dc\u8a0e\u8ad6\u3002\u6700\u5f8c\uff0c\u5728\u7b2c\u516d\u7bc0\u7e3d\u7d50\u672c\u7814\u7a76\u7684\u6210\u679c\u548c\u672a\u4f86\u6539\u9032\u65b9\u5411\u3002</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"html": null, |
|
"text": "\u3002Q1 \u4ee3\u8868\u6b63\u5411\u6fc0\u6602\u5171 43 \u9996\uff0cQ2 \u4ee3\u8868\u8ca0\u5411\u6fc0\u6602\u5171 45 \u9996\uff0c Q3 \u4ee3\u8868\u8ca0\u5411\u5e73\u975c\u5171 43 \u9996\uff0cQ4 \u4ee3\u8868\u6b63\u5411\u5e73\u975c\u5171 39 \u9996\u3002V \u548c A \u5206\u5225\u4ee3\u8868\u6548\u50f9(Valence) \u548c\u559a\u9192(Arousal)\uff0cV \u6a19\u8a18 1 \u4ee3\u8868\u6b63\u5411\u60c5\u7dd2\u30010 \u4ee3\u8868\u8ca0\u5411\u60c5\u7dd2\uff0cA \u6a19\u8a18\u70ba 1 \u4ee3\u8868\u6fc0\u6602 \u60c5\u7dd2\u30010 \u4ee3\u8868\u5e73\u975c\u60c5\u7dd2\u3002 \u8868 \u6240\u793a\u5728\u591a\u8f38\u51fa\u67b6\u69cb\u5e95\u4e0b\uff0c\u5f9e\u96f6\u8a13\u7df4 CVAT \u8cc7\u6599\u96c6(Training From Scratch)\u548c\u5f9e CVAP+CVAW \u8cc7\u6599\u96c6\u6a21\u578b\u9077\u79fb\u81f3 CVAT \u8cc7\u6599\u96c6(Transfer Learning)\u7684\u7d50\u679c\u4f86\u770b\uff0c\u5169\u8005\u540c \u6a23\u90fd\u5728\u5b78\u7fd2\u901f\u7387\u7686\u70ba 1e-05 \u7684\u8a13\u7df4\u6548\u679c\u6700\u4f73\uff0c\u7d93\u904e\u9077\u79fb\u7684\u5747\u65b9\u8aa4\u5dee\u70ba 0.65696 \u512a\u65bc\u672a\u7d93 \u9077\u79fb\u7684 0.72025\u3002\u7d93\u904e\u9077\u79fb\u7684\u6a21\u578b\u7d50\u679c\u6bd4\u672a\u7d93\u9077\u79fb\u6548\u679c\u597d\uff0c\u5728\u4e0d\u540c\u5b78\u7fd2\u901f\u7387\u4e0b\uff0c\u7d93\u9077\u79fb\u7684 CVAT \u5728\u4e0d\u540c\u7684\u5b78\u7fd2\u901f\u7387\u4e0b\u90fd\u512a\u65bc\u672a\u7d93\u9077\u79fb\u7684\u7d50\u679c\uff0c\u6709\u7d93\u904e\u9077\u79fb\u5b78\u7fd2\u7684 CVAT \u6536\u6582\u901f\u5ea6 \u4e5f\u6bd4\u672a\u7d93\u9077\u79fb\u5b78\u7fd2\u7684\u5feb\u3002\u8868 4 \u70ba\u5728\u55ae\u8f38\u51fa\u67b6\u69cb\u5e95\u4e0b\uff0c\u5f9e\u96f6\u8a13\u7df4 CVAT \u8cc7\u6599\u96c6(Training From Scratch)\u548c\u5f9e CVAP+CVAW \u8cc7\u6599\u96c6\u6a21\u578b\u9077\u79fb\u81f3 CVAT \u8cc7\u6599\u96c6(Transfer Learning)\u7684", |
|
"content": "<table><tr><td>\u5ed6\u5bb6\u8abc \u7b49 \u5ed6\u5bb6\u8abc \u7b49 \u5ed6\u5bb6\u8abc \u7b49</td></tr><tr><td>\u9810\u8a13\u7df4\u6a21\u578b\u5728\u591a\u9805\u4efb\u52d9\u4e2d\u53d6\u5f97\u7a81\u7834\uff0c\u5305\u542b\u5be6\u9ad4 \u8faf\u8b58\u3001\u5e8f\u5217\u6216\u53e5\u5b50\u5c0d\u5206\u985e\u3001\u554f\u7b54\u7b49 11 \u7a2e\u4efb\u52d9\uff0c\u4f7f\u5f97 Transformer-based \u7684\u6a21\u578b\u67b6\u69cb\u5728\u81ea\u7136 \u8a9e\u8a00\u9818\u57df\u4e2d\u6210\u70ba\u4e3b\u6d41\u3002\u5728\u6b4c\u8a5e\u60c5\u7dd2\u8fa8\u8b58\u7684\u61c9\u7528\u4e0a\uff0cAgrawal et al. (2021)\u7684\u7814\u7a76\u4fbf\u662f\u4f7f\u7528\u57fa \u65bc Transformer \u7684\u8a9e\u8a00\u6a21\u578b\u4f5c\u70ba\u60c5\u7dd2\u5206\u985e\u7684\u57fa\u790e\u67b6\u69cb\uff0c\u5728\u591a\u500b\u82f1\u6587\u6b4c\u8a5e\u60c5\u7dd2\u8cc7\u6599\u96c6\u4e0a\u9054\u5230 \u826f\u597d\u7684\u6210\u679c\uff0c\u5c55\u793a Transformer-based \u65b9\u6cd5\u7684\u9ad8\u6548\u80fd\u3002 \u5728\u67d0\u4e9b\u9818\u57df\u4e2d\u6a19\u7c64\u7684\u6a19\u8a18\u6602\u8cb4\uff0c\u82e5\u539f\u59cb\u8cc7\u6599\u4e2d\u542b\u6709\u6a19\u7c64\u7684\u6578\u91cf\u592a\u5c11\uff0c\u5bb9\u6613\u9020\u6210\u6a21\u578b\u904e\u5ea6 \u64ec\u5408\u3002\u9077\u79fb\u5b78\u7fd2\u5e38\u7528\u7684\u5169\u500b\u6280\u5de7\uff1a\u7279\u5fb5\u8403\u53d6\u548c\u5fae\u8abf\u3002\u9077\u79fb\u5b78\u7fd2\u7684\u6709\u6548\u6027\u50ac\u751f\u4e86\u591a\u7a2e\u61c9\u7528\uff0c \u4f8b\u5982\uff1a\u5b78\u7fd2\u60c5\u7dd2\u8faf\u8b58(Hung et al., 2019)\u3001\u6642\u9593\u5e8f\u5217\u4efb\u52d9(Fawaz et al., 2018)\u30013D \u91ab\u5b78\u5f71\u50cf \uf0b7 \u6b4c\u8a5e\u8cc7\u6599\u96c6\uff1a\u70ba\u672c\u7814\u7a76\u81ea\u884c\u6536\u96c6\u4e26\u6a19\u7c64\u7684\u8cc7\u6599\u96c6\u3002\u6a19\u7c64\u5305\u542b\u8c61\u9650\u4e00(Q1) \u3001\u8c61\u9650\u4e8c(Q2)\u3001 \u8c61\u9650\u4e09(Q3)\u53ca\u8c61\u9650\u56db(Q4)\u540d\u7a31 \u7e3d\u6578 \u7bc4\u4f8b\u6587\u5b57 Valence Arousal \u4e2d\u6587\u7dad\u5ea6\u60c5\u7dd2\u5b57\u5178 \u4e2d\u6587\u7dad\u5ea6\u60c5\u7dd2\u7247\u8a9e CVAP 2,998 \u975e\u5e38\u53ef\u611b 8 7.313 \u4e2d\u6587\u7dad\u5ea6\u60c5\u7dd2\u8a9e\u6599 \u5eab CVAT 2,009 \u9019\u7a2e\u8a18\u9304\u96e3\u514d\u7a7a\u6d1e\uff0c\u865b\u69cb\u4e5f\u986f\u5f97\u8584\u5f31\u3002 3 3.5 \u9810\u8a13\u7df4\u6a21\u578b\u52a0\u4e0a\u4e00\u5c64 Dropout \u548c\u4e00\u5c64\u7dda\u6027\u5206\u985e\u5c64\uff0c\u512a\u5316\u5668\u70ba Adam\uff0c\u5b78\u7fd2\u901f\u7387\u672c\u7814\u7a76\u5617\u8a66 \u4e5f\u5728\u5be6\u9a57\u4e2d\u767c\u73fe\uff0c\u82e5\u5b78\u7fd2\u901f\u7387\u4e0d\u5920\u5c0f\u6703\u5c0e\u81f4\u640d\u5931(loss)\u7121\u6cd5\u964d\u4f4e\uff0c\u6700\u5f8c\u9078\u64c7\u4e86 1e-05\u30011e-06 \u548c 5e-05 \u4e09\u500b\u8d85\u53c3\u6578\u9032\u884c\u8fd1\u4e00\u6b65\u5be6\u9a57\u53ca\u6bd4\u8f03\uff0c\u6bcf\u6b21\u8a13\u7df4\u6700\u5927 Epoch \u8a2d\u5b9a\u70ba 100\uff0c\u52a0\u5165 Early Stopping \u7684\u6a5f\u5236\uff0c\u5c07\u8010\u5fc3(Patience)\u8a2d\u81f3\u70ba 10\u3002 \u679c\u3002 \u5b78\u7fd2\u901f\u7387 (Lr) \u640d\u5931 (loss) Epoch Arousal 1e-6 0.88142 7 5e-5 0.93479 11 [\u65b9\u6cd5 \u679c Transfer Learning 1e-5 0.84259 1 \u8868 3. \u591a\u8f38\u51fa\u67b6\u69cb(Multi-output)\u7d93\u9077\u79fb\u5b78\u7fd2\u8207\u672a\u7d93\u9077\u79fb\u5b78\u7fd2\u5728 CVAP \u8cc7\u6599\u96c6\u4e4b\u7d50 \u7d93\u9077\u79fb\u5b78\u7fd2 5e-5 0.53422 5 \u7387\u70ba 1e-05 \u7684\u6642\u5019\u5f97\u5230\u6700\u4f73\u7d50\u679c\u3002 \u8b49\u6a21\u578b\u9810\u6e2c\u6b4c\u8a5e\u60c5\u7dd2\u7684\u6210\u6548\uff0c\u6bcf\u500b\u6bb5\u843d\u5305\u542b\u5728\u4e0d\u540c\u7684\u6a21\u578b\u67b6\u69cb\u548c\u4e0d\u540c\u8a13\u7df4\u65b9\u5f0f\u7684\u5be6\u9a57\u7d50 Valence 1e-6 0.46624 15 \u9077\u79fb\u5b78\u7fd2\u7684 CVAT \u5176\u5747\u65b9\u8aa4\u5dee\u70ba 0.84259 \u512a\u65bc\u672a\u7d93\u9077\u79fb\u7684 0.87107\uff0c\u5169\u8005\u540c\u6a23\u90fd\u5728\u5b78\u7fd2\u901f \u672c\u7ae0\u7bc0\u5c07\u5be6\u9a57\u7d50\u679c\u5206\u70ba\u5169\u500b\u968e\u6bb5\uff0c\u7b2c\u4e00\u968e\u6bb5\u662f\u4e2d\u6587\u60c5\u7dd2\u7684\u6a21\u578b\u8a13\u7df4\u7d50\u679c\uff0c\u7b2c\u4e8c\u968e\u6bb5\u662f\u9a57 0.47898 \u4f9d\u7136\u662f\u512a\u65bc\u672a\u7d93\u9077\u79fb\u7684\u5747\u65b9\u8aa4\u5dee 0.50338\u3002\u6bd4\u8f03\u8f38\u51fa\u70ba\u559a\u9192(Arousal)\u7684\u7d50\u679c\uff0c\u7d93 1e-5 0.47898 4 \u591a\u7a2e\u5b78\u7fd2\u901f\u7387\u9032\u884c\u5be6\u9a57\uff0c\u5fae\u8abf\u6a21\u578b\u9069\u5408\u8f03\u5c0f\u7684\u5b78\u7fd2\u901f\u7387\u907f\u514d\u9810\u8a13\u7df4\u7684\u6b0a\u91cd\u88ab\u4fee\u6539\u7834\u58de\uff0c 4. \u5be6\u9a57\u7d50\u679c (Experimental Result) \u7684\u6700\u4f73\u5b78\u7fd2\u901f\u7387\u70ba 1e-5\uff0c\u5c31\u7b97\u540c\u6a23\u90fd\u5728 1e-5 \u7684\u5b78\u7fd2\u901f\u7387\u5e95\u4e0b\uff0c\u7d93\u9077\u79fb\u5b78\u7fd2\u7684\u5747\u65b9\u8aa4\u5dee 5e-5 0.9303 10 \u7684 CVAT \u5176\u7d50\u679c\u512a\u65bc\u672a\u7d93\u9077\u79fb\u7684\u7d50\u679c\u3002\u7d93\u9077\u79fb\u5b78\u7fd2\u7684\u6700\u4f73\u5b78\u7fd2\u901f\u7387\u70ba 1e-06\uff0c\u672a\u7d93\u9077\u79fb CVAW 5,512 \u4e0d\u723d 2.8 \u8981\u91cd\u65b0\u5b78\u7fd2\uff0c\u5373\u4f7f\u53ea\u6709\u5c11\u91cf\u8a13\u7df4\u6a23\u672c\u4e5f\u80fd\u9054\u5230\u826f\u597d\u7684\u6548\u679c\u3002\u5728\u6a21\u578b\u5fae\u8abf\u65b9\u9762\uff0c\u5728 BERT \u4e4b\u5f8c\u7684\u7d50\u679c\u8f49\u70ba\u56db\u500b\u8c61\u9650 Q1\u3001Q2\u3001Q3 \u548c Q4 \u7684\u60c5\u7dd2\u5206\u985e\u4e4b\u7d50\u679c\uff0c\u6700\u5f8c\u9a57\u8b49\u5176\u5206\u985e\u6548\u679c\u3002 0.50338\uff0c\u800c\u7d93\u9077\u79fb\u5b78\u7fd2(Transfer Learning)\u7684\u5747\u65b9\u8aa4\u5dee(MSE)\u70ba 0.46624\uff0c\u986f\u793a\u7d93\u9077\u79fb\u5b78\u7fd2 Arousal 1e-6 0.93317 28 7.2 3.2.2 \u5be6\u65bd\u7d30\u7bc0 (Implementation Details) \u672c\u7814\u7a76\u4ee5\u77e5\u540d\u7684\u6df1\u5ea6\u795e\u7d93\u7db2\u8def\u6a21\u578b\u2500BERT(Devlin et al., 2018)\u70ba\u57fa\u790e\u67b6\u69cb\uff0c\u4e26\u9032\u4e00\u6b65\u63d0 \u51fa\u4e86\u591a\u8f38\u51fa(Multi-output)\u8207\u55ae\u8f38\u51fa(Single-output)\u5169\u7a2e\u6a21\u578b\u8a13\u7df4\u67b6\u69cb\u3002\u5982\u5716 2 \u6240\u793a\uff0c\u591a\u8f38 \u51fa(Multi-output)\u67b6\u69cb\u70ba\u4e00\u500b BERT \u6a21\u578b\u5171\u4eab\u6b0a\u91cd\uff0c\u4e00\u6b21\u8f38\u51fa\u6548\u50f9(Valence)\u548c\u559a\u9192(Arousal) \u5169\u500b\u9023\u7e8c\u503c\uff0c\u55ae\u8f38\u51fa(Single-output)\u70ba\u4e00\u500b BERT \u6a21\u578b\u8f38\u51fa\u55ae\u4e00\u500b\u503c(\u4f8b\u5982 Valence)\u3002\u7531\u65bc \u662f\u6548\u50f9(Valence)\u3001\u559a\u9192(Arousal)\u7684\u6578\u503c\u9810\u6e2c\uff0c\u56e0\u6b64\u6a21\u578b\u8a13\u7df4\u6642\u7684\u640d\u5931\u51fd\u6578\u9078\u64c7\u4f7f\u7528\u5747\u65b9 \u8aa4\u5dee(Mean square error, MSE)\u3002\u5169\u7a2e\u6a21\u578b\u67b6\u69cb\u90fd\u5be6\u9a57\u5169\u7a2e\u65b9\u6cd5\uff1a(a)\u4f7f\u7528\u5f9e CVAW+CVAP \u9077\u79fb\u81f3 CVAT \u8cc7\u6599\u96c6\u7684\u9077\u79fb\u5b78\u7fd2\u65b9\u6cd5\u3002(b)\u5f9e\u96f6\u8a13\u7df4 CVAT \u672a\u9077\u79fb\u7684\u65b9\u6cd5\u3002\u6700\u5f8c\u9032\u884c\u5169\u7a2e \u65b9\u6cd5\u7684\u6bd4\u8f03\u3002\u672c\u7814\u7a76\u57fa\u65bc\u5fae\u8abf\u65b9\u6cd5\u9032\u884c\u5be6\u9a57\uff0c\u5fae\u8abf\u65b9\u6cd5\u7684\u512a\u9ede\u5728\u65bc\u6a21\u578b\u7684\u8a31\u591a\u53c3\u6578\u4e0d\u9700 3.2.3 \u6b4c\u8a5e\u60c5\u7dd2\u4e4b\u5206\u985e (Lyrics Emotion Classification) \u6b64\u968e\u6bb5\u7684\u76ee\u7684\u5728\u65bc\u9a57\u8b49\u672c\u7814\u7a76\u63d0\u51fa\u7684\u65b9\u6cd5\u80fd\u5728\u672a\u5b78\u7fd2\u904e\u6b4c\u8a5e\u6587\u672c\u7684\u60c5\u6cc1\u4e0b\uff0c\u80fd\u5c0d\u65bc\u6b4c\u8a5e \u6587\u672c\u9032\u884c\u60c5\u7dd2\u7684\u6a19\u8a3b\u3002\u9996\u5148\uff0c\u5c07\u6b4c\u8a5e\u6587\u672c\u9032\u884c\u8207\u7b2c\u4e00\u5c0f\u7bc0\u540c\u6a23\u7684\u9810\u8655\u7406\u5f8c\u9001\u5165\u6a21\u578b\u9032\u884c \u9810\u6e2c\uff0c\u8f38\u51fa\u6548\u50f9(Valence)\u548c\u559a\u9192(Arousal)\uff0c\u5176\u7bc4\u570d\u70ba 0 \u5230 9\u3002\u672c\u7814\u7a76\u4f9d\u7167\u539f\u8cc7\u6599\u96c6\u7684\u6558\u8ff0 (Yu et al., 2016; Yu et al., 2017)\uff0c\u6548\u50f9(Valence)\u548c\u559a\u9192(Arousal)\u90fd\u4ee5\u4e2d\u6027\u503c 5 \u70ba\u95be\u503c\uff0c\u56e0 \u6b64\uff0c\u82e5\u9810\u6e2c\u51fa\u7684\u6548\u50f9(Valence)\u6578\u503c\u5927\u65bc 5 \u8868\u793a\u6a21\u578b\u9810\u6e2c\u8a72\u6b4c\u8a5e\u70ba\u6b63\u5411\u60c5\u7dd2\u4e26\u6a19\u8a18\u70ba 1\u3001 \u6548\u50f9(Valence)\u6578\u503c\u5c0f\u65bc 5 \u5247\u8868\u793a\u6a21\u578b\u9810\u6e2c\u8a72\u6b4c\u8a5e\u70ba\u8ca0\u5411\u60c5\u7dd2\u4e26\u6a19\u8a18\u70ba 0\uff0c\u82e5\u9810\u6e2c\u559a\u9192 (Arousal)\u6578\u503c\u5927\u65bc 5 \u5247\u8868\u793a\u6a21\u578b\u9810\u6e2c\u8a72\u6b4c\u8a5e\u70ba\u6fc0\u52d5\u60c5\u7dd2\u4e26\u6a19\u8a18\u70ba 1\u3001\u559a\u9192(Arousal)\u503c\u5c0f\u65bc 5 \u8868\u793a\u6a21\u578b\u9810\u6e2c\u8a72\u6b4c\u8a5e\u70ba\u5e73\u975c\u60c5\u7dd2\u4e26\u6a19\u8a18\u70ba 0\u3002\u6211\u5011\u5c07\u6548\u50f9(Valence)\u548c\u559a\u9192(Arousal)\u6a19\u8a18 \u9996\u5148\u6bd4\u8f03\u6548\u50f9(Valence)\u8f38\u51fa\u7684\u7d50\u679c\uff0c\u672a\u7d93\u9077\u79fb(Training From Scratch)\u7684\u5747\u65b9\u8aa4\u5dee(MSE)\u70ba \u5f9e 0 \u8a13\u7df4 CVAT 1e-5 0.87107 5 From Scratch Valence 1e-5 0.50338 12 1e-6 0.51199 44 5e-5 0.55236 6 \u5982\u8868 3 \u7d50\u679c\uff0c\u55ae\u8f38\u51fa(Single-output)\u67b6\u69cb\u662f\u5c07\u6548\u50f9(Valence)\u548c\u559a\u9192(Arousal)\u4f5c\u70ba\u7368\u7acb\u7684\u5169\u500b\u8f38\u51fa\uff0c \u8868 4. \u55ae\u8f38\u51fa\u67b6\u69cb(Single-output)\u7d93\u9077\u79fb\u5b78\u7fd2\u8207\u672a\u7d93\u9077\u79fb\u5b78\u7fd2\u5728 CVAP \u8cc7\u6599\u96c6\u4e4b\u7d50 \u672a\u7d93\u9077\u79fb\u7684 CVAT \u5206\u985e\u7d50\u679c\u4e2d\uff0cQ1 \u6709 14%\u88ab\u932f\u5206\u6210 Q2\uff0c25.6%\u88ab\u932f\u5206\u6210 Q3\uff0c35%\u88ab\u932f \u679c \u5206\u6210 Q4\uff0cQ2 \u53ea\u6709 62%\u5206\u985e\u6b63\u78ba\uff0c\u5176\u9918 37%\u7686\u88ab\u932f\u5206\u6210 Q3\uff0cQ3 \u6709 14%\u88ab\u932f\u5206\u6210 Q2\uff0c\u5176 [Table 4. CVAP results on the single-output model with/without transfer learning] \u65b9\u6cd5 \u8f38\u51fa \u5b78\u7fd2\u901f\u7387 (Lr) \u640d\u5931 (loss) Epoch \u9918\u5206\u985e\u6b63\u78ba\uff0cQ4 \u6709 7.7%\u88ab\u932f\u5206\u6210 Q2\uff0c69%\u5bb9\u6613\u88ab\u932f\u5206\u6210 Q3 \uff0c\u53ea\u6709 29%\u5206\u985e\u6b63\u78ba\u3002</td></tr><tr><td>\u5206\u6790(Chen et al., 2019)\u3002\u5728\u81ea\u7136\u8a9e\u8a00\u8655\u7406\u9818\u57df\uff0c\u4e5f\u5e38\u904b\u7528\u9077\u79fb\u5b78\u7fd2\u7684\u6280\u5de7\u5c0d\u65bc\u9810\u8a13\u7df4\u6a21\u578b \u9032\u884c\u6a21\u578b\u5fae\u8abf\u6216\u7279\u5fb5\u8403\u53d6\uff0cTransformer-based \u7684\u9810\u8a13\u7df4\u6a21\u578b\uff0c\u8b49\u660e\u5fae\u8abf\u5728\u7121\u8a3b\u91cb\u8a9e\u6599\u4e0a \u9810\u8a13\u7df4\u5927\u898f\u6a21\u8a9e\u8a00\u6a21\u578b\u7684\u6709\u6548\u6027\u3002Hung & Chang (2021)\u5247\u63d0\u5230\u591a\u5c64\u9077\u79fb\u5b78\u7fd2\u7684\u6709\u6548\u6027\uff0c \u8868\u660e\u4e86\u4e0d\u7ba1\u5728\u96fb\u8166\u8996\u89ba\u4efb\u52d9\u6216\u81ea\u7136\u8a9e\u8a00\u8655\u7406\u4efb\u52d9\uff0c\u7d93\u9077\u79fb\u5b78\u7fd2\u7684\u7d50\u679c\u6703\u512a\u65bc\u672a\u7d93\u904e\u9077\u79fb \u7684\u7d50\u679c\uff0c\u56e0\u6b64\uff0c\u672c\u7bc7\u7814\u7a76\u63d0\u51fa\u7684\u6a21\u578b\u67b6\u69cb\u57fa\u65bc\u8a9e\u8a00\u9810\u8a13\u7df4\u6a21\u578b\u5c0d\u6587\u672c\u9032\u884c\u9077\u79fb\u5b78\u7fd2\u3002 \u67b6\u69cb\u540d\u7a31 \u8f38\u51fa \u5b78\u7fd2\u901f\u7387 (Lr) \u640d\u5931 (loss) Epoch \u591a\u8f38\u51fa\u67b6\u69cb Multi-output -1e-5 0.59126 14 1e-6 0.65283 1e-5 0.72025 10 \u5f9e 0 \u8a13\u7df4 CVAT From Scratch 1e-6 0.73979 58 5e-5 0.80925 4.2 \u4e2d\u6587\u6b4c\u8a5e\u60c5\u7dd2\u6a21\u578b\u4e4b\u9a57\u8b49 (Verification of Chinese Lyrics Emotion Model) 10 1e-5 0.65696 \u6b64\u6bb5\u843d\u8a0e\u8ad6\u524d\u8ff0\u7684\u4e2d\u6587\u60c5\u7dd2\u6a21\u578b\u61c9\u7528\u65bc\u4e2d\u6587\u6b4c\u8a5e\u5206\u985e\u4e4b\u7d50\u679c\uff0c\u7b2c\u4e00\u5c0f\u7bc0\u63cf\u8ff0\u4e2d\u6587\u6b4c\u8a5e\u7684 3 \u5206\u985e\u7d50\u679c\uff0c\u7b2c\u4e8c\u5c0f\u7bc0\u66f4\u9032\u4e00\u6b65\u8aaa\u660e Valence-Arousal \u5e73\u9762\u7684\u5206\u985e\u7d50\u679c\u3002\u4e2d\u6587\u6b4c\u8a5e\u7684\u60c5\u7dd2\u5206 \u7d93\u9077\u79fb\u5b78\u7fd2 1e-6 0.67836 22 \u985e\u662f\u5c07\u6a21\u578b\u8f38\u51fa\u7684\u6548\u50f9(Valence)\u548c\u559a\u9192(Arousal)\u57fa\u65bc\u4e2d\u6027\u503c 5 \u4f5c\u70ba\u95be\u503c\uff0c\u8f49\u63db\u70ba\u5ea7\u6a19\u5e73 Transfer Learning 5e-5 0.70594 \u9762\u4e0a\u7684\u56db\u500b\u8c61\u9650\u985e\u5225(Q1\u3001Q2\u3001Q3\u3001Q4)\u3002\u6b4c\u8a5e\u60c5\u7dd2\u5206\u985e\u7d50\u679c\u5982\u8868 8 \u6240\u793a\uff0c\u5305\u542b\u6b4c\u540d\u3001\u6b4c 2 \u8a5e\u3001\u6a21\u578b\u9810\u6e2c\u7684 Valence \u6578\u503c\u548c Arousal \u6578\u503c\u3001\u9810\u6e2c\u7684\u6a19\u7c64\u548c\u771f\u5be6\u6a19\u7c64\u3002 32 \uf0b7 \u4e2d\u6587\u7dad\u5ea6\u60c5\u7dd2\u8cc7\u6599\u96c6(Yu et al., 2016; Yu et al., 2017)\uff1a\u8cc7\u6599\u5982\u8868 1 \u6240\u793a\uff0c\u5305\u542b\u4e2d\u6587\u60c5 \u7dd2\u5b57\u5178(Chinese Valence-Arousal Words, CVAW)\u3001\u4e2d\u6587\u7dad\u5ea6\u60c5\u7dd2\u7247\u8a9e(Chinese Valence-5e-5 0.69301 17 4.2.1 \u4e2d\u6587\u6b4c\u8a5e\u60c5\u7dd2\u5206\u985e\u4e4b\u7d50\u679c (Chinese Lyrics Emotion Classification Results)</td></tr><tr><td>Arousal Phrases, CVAP)\u4ee5\u53ca\u4e2d\u6587\u60c5\u7dd2\u8a9e\u6599\u5eab(Chinese Valence-Arousal Text, CVAT)\u4e09 1e-5 0.3788 24 \u7d93\u9077\u79fb\u5b78\u7fd2 CVAT \u6a21\u578b\u8207\u672a\u7d93\u9077\u79fb\u5b78\u7fd2 CVAT \u6a21\u578b\u7684\u6b4c\u8a5e\u60c5\u7dd2\u5206\u985e\u7684\u6df7\u6dc6\u77e9\u9663\u7684\u7d50\u679c\uff0c</td></tr><tr><td>\u500b\u3002CVAW \u5305\u542b 5,512 \u500b\u4e2d\u6587\u60c5\u7dd2\u8a5e\uff1bCVAP \u4e2d\u6bcf\u500b\u7247\u8a9e\u7d50\u5408\u4fee\u98fe\u7b26\u548c\u4f86\u81ea CVAW \u4e2d\u7684\u8a5e\uff0c\u5171 2,998 \u500b\u4e2d\u6587\u60c5\u7dd2\u7247\u8a9e\uff1bCVAT \u5247\u5f9e 720 \u7bc7\u4f86\u81ea 6 \u7a2e\u4e0d\u540c\u985e\u5225\u7684\u7db2\u8def\u6587 \u7ae0\u8490\u96c6\u800c\u4f86\uff0c\u5171 2,009 \u500b\u53e5\u5b50\u3002\u4e09\u500b\u8cc7\u6599\u96c6\u7684\u6bcf\u500b\u8a5e\u6216\u53e5\u5b50\u7686\u5305\u542b\u6548\u50f9(Valence)\u548c \u559a\u9192(Arousal)\u3002\u6548\u50f9(Valence)\u7684\u7bc4\u570d\u5f9e 1 \u5230 9 \u5176\u5206\u5225\u4ee3\u8868\u6975\u7aef\u8ca0\u9762\u548c\u6975\u7aef\u6b63\u9762\u7684\u60c5 \u5716 2. \u672c\u7814\u7a76\u63d0\u51fa\u4e4b\u6a21\u578b\u67b6\u69cb\uff1a\u5305\u542b\u5169\u7a2e\u67b6\u69cb\u5206\u5225\u70ba\u55ae\u8f38\u51fa\u8207\u591a\u8f38\u51fa \u55ae\u8f38\u51fa\u67b6\u69cb Valence 1e-6 0.39498 \u5982\u8868 5 \u6240\u793a\uff0c\u5728\u7d93\u9077\u79fb\u5b78\u7fd2\u7684 CVAT \u5206\u985e\uff0cQ1 \u6709 26%\u5bb9\u6613\u88ab\u932f\u5206\u6210 Q2\uff0c19%\u6703\u88ab\u932f\u5206 35 5e-5 0.51918 \u6210 Q4\uff0c\u50c5\u6709 2.3%\u6703\u88ab\u5206\u6210 Q3\uff0c\u4e5f\u5c31\u662f\u5728 Q1 \u7684\u60c5\u7dd2\u985e\u5225\u4e2d\uff0c\u6548\u50f9(Valence)\u548c\u559a\u9192(Arousal) 4 \u90fd\u6709\u88ab\u5206\u985e\u932f\u7684\u53ef\u80fd\uff0c\u6548\u50f9(Valence)\u548c\u559a\u9192(Arousal)\u540c\u6642\u88ab\u932f\u5206\u7684\u6a5f\u7387\u50c5 2.3%\u3002Q4 \u6709 [Figure 2. Training architecture of multi-output and single-output models] Single-output 1e-5 0.77339 12 56% \u88ab\u932f\u5206\u70ba Q3\uff0c\u88ab\u5206\u6210 Q2 \u7684\u53ef\u80fd\u70ba 5%\uff0c\u50c5\u6709 2.5%\u6703\u88ab\u5206\u6210 Q1\uff0c\u4e5f\u5c31\u662f\u5728 Q4 \u7684</td></tr><tr><td>\u7dd2\uff0c\u559a\u9192(Arousal)\u7684\u7bc4\u570d\u5f9e 1 \u5230 9 \u5176\u5206\u5225\u4ee3\u8868\u5e73\u975c\u548c\u6fc0\u52d5\uff0c\u6548\u50f9(Valence)\u548c\u559a\u9192 Arousal 1e-6 0.92874 19 \u60c5\u7dd2\u985e\u5225\u4e2d\uff0c\u50c5\u6548\u50f9(Valence)\u5bb9\u6613\u88ab\u5206\u985e\u932f\u8aa4\u3002Q2 \u7684\u60c5\u7dd2\u5e7e\u4e4e\u90fd\u80fd\u5920\u6e96\u78ba\u8fa8\u8b58\uff0c\u50c5\u6709 2%</td></tr><tr><td>(Arousal)\u82e5\u70ba 5 \u5247\u4ee3\u8868\u6c92\u6709\u7279\u5b9a\u50be\u5411\u7684\u4e2d\u6027\u60c5\u7dd2\u3002 \u9032\u884c\u8a13\u7df4\u3002 5e-5 \u6703\u88ab\u932f\u5206\u6210 Q3\uff0c\u53ea\u6709 2%\u6703\u56e0\u70ba\u559a\u9192(Arousal)\u88ab\u932f\u8aa4\u5206\u985e\u3002Q3 \u6709 16%\u6703\u88ab\u932f\u5206\u6210 Q2\uff0c 1.8867 12 \u5176\u9918\u7684\u90fd\u80fd\u88ab\u6b63\u78ba\u5206\u985e\uff0c\u4e5f\u5c31\u662f\u5728 Q3 \u7684\u60c5\u7dd2\u53ea\u6709 16%\u6703\u56e0\u70ba\u559a\u9192(Arousal)\u88ab\u932f\u8aa4\u5206\u985e\u3002</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF2": { |
|
"type_str": "table", |
|
"html": null, |
|
"text": "", |
|
"content": "<table><tr><td/><td colspan=\"3\">Prediction by CVAT: Transfer Learning</td><td/><td/></tr><tr><td/><td/><td>Q1</td><td>Q2</td><td>Q3</td><td>Q4</td></tr><tr><td/><td>Q1</td><td>23</td><td>8</td><td>1</td><td>11</td></tr><tr><td>True</td><td>Q2</td><td>0</td><td>44</td><td>1</td><td>0</td></tr><tr><td/><td>Q3</td><td>0</td><td>7</td><td>36</td><td>0</td></tr><tr><td/><td>Q4</td><td>1</td><td>2</td><td>19</td><td>17</td></tr><tr><td/><td colspan=\"3\">Prediction by CVAT: Training from Scratch</td><td/><td/></tr><tr><td/><td/><td>Q1</td><td>Q2</td><td>Q3</td><td>Q4</td></tr><tr><td/><td>Q1</td><td>11</td><td>6</td><td>11</td><td>15</td></tr><tr><td>True</td><td>Q2</td><td>0</td><td>28</td><td>17</td><td>0</td></tr><tr><td/><td>Q3</td><td>0</td><td>6</td><td>37</td><td>0</td></tr><tr><td/><td>Q4</td><td>0</td><td>3</td><td>27</td><td>9</td></tr></table>", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |