|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T10:38:26.405297Z" |
|
}, |
|
"title": "HinGE: A Dataset for Generation and Evaluation of Code-Mixed Hinglish Text", |
|
"authors": [ |
|
{ |
|
"first": "Vivek", |
|
"middle": [], |
|
"last": "Srivastava", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "TCS Research Pune", |
|
"location": { |
|
"region": "Maharashtra", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Mayank", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IIT Gandhinagar Gandhinagar", |
|
"location": { |
|
"region": "Gujarat", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Text generation is a highly active area of research in the computational linguistic community. The evaluation of the generated text is a challenging task and multiple theories and metrics have been proposed over the years. Unfortunately, text generation and evaluation are relatively understudied due to the scarcity of high-quality resources in code-mixed languages where the words and phrases from multiple languages are mixed in a single utterance of text and speech. To address this challenge, we present a corpus (HinGE) for a widely popular code-mixed language Hinglish (code-mixing of Hindi and English languages). HinGE has Hinglish sentences generated by humans as well as two rule-based algorithms corresponding to the parallel Hindi-English sentences. In addition, we demonstrate the inefficacy of widely-used evaluation metrics on the code-mixed data. The HinGE dataset will facilitate the progress of natural language generation research in code-mixed languages.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Text generation is a highly active area of research in the computational linguistic community. The evaluation of the generated text is a challenging task and multiple theories and metrics have been proposed over the years. Unfortunately, text generation and evaluation are relatively understudied due to the scarcity of high-quality resources in code-mixed languages where the words and phrases from multiple languages are mixed in a single utterance of text and speech. To address this challenge, we present a corpus (HinGE) for a widely popular code-mixed language Hinglish (code-mixing of Hindi and English languages). HinGE has Hinglish sentences generated by humans as well as two rule-based algorithms corresponding to the parallel Hindi-English sentences. In addition, we demonstrate the inefficacy of widely-used evaluation metrics on the code-mixed data. The HinGE dataset will facilitate the progress of natural language generation research in code-mixed languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Code-mixing is the mixing of two or more languages in a single utterance of speech or text. A commonly observed communication pattern for a multilingual speaker is to mix words and phrases from multiple languages. Code-mixing is widespread across various language pairs, such as Spanish-English, Hindi-English, and Bengali-English. Recently, we observe a boom in the availability of code-mixed data with the inflation of the social media platforms such as Twitter and Facebook.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In past, we witness magnitude of work to address standard code-mixing natural language understanding (NLU) tasks such as language identification (Shekhar et al., 2020; Singh et al., 2018a; Ramanarayanan et al., 2019) , POS tagging (Singh et al., 2018b; Vyas et al., 2014) , named entity recognition (Singh et al., 2018a) , and dependency pars-ing (Zhang et al., 2019a) along with sentence classification tasks like sentiment analysis (Patwa et al., 2020; Joshi et al., 2016) , stance detection (Utsav et al., 2020) , and sarcasm detection (Swami et al., 2018) . Unlike code-mixed NLU, natural language generation (NLG) of code-mixed text is highly understudied. Resource scarcity adds to the challenge of building efficient solutions for code-mixed NLG tasks. Evaluation of the code-mixed NLG tasks also lacks standalone resources, theories, and metrics.", |
|
"cite_spans": [ |
|
{ |
|
"start": 145, |
|
"end": 167, |
|
"text": "(Shekhar et al., 2020;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 168, |
|
"end": 188, |
|
"text": "Singh et al., 2018a;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 189, |
|
"end": 216, |
|
"text": "Ramanarayanan et al., 2019)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 231, |
|
"end": 252, |
|
"text": "(Singh et al., 2018b;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 253, |
|
"end": 271, |
|
"text": "Vyas et al., 2014)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 299, |
|
"end": 320, |
|
"text": "(Singh et al., 2018a)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 347, |
|
"end": 368, |
|
"text": "(Zhang et al., 2019a)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 434, |
|
"end": 454, |
|
"text": "(Patwa et al., 2020;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 455, |
|
"end": 474, |
|
"text": "Joshi et al., 2016)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 494, |
|
"end": 514, |
|
"text": "(Utsav et al., 2020)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 539, |
|
"end": 559, |
|
"text": "(Swami et al., 2018)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Recently, we observe a growing interest in the code-mixed text generation task. To generate the code-mixed data various techniques have been employed such as matrix frame language theory (Lee et al., 2019; Gupta et al., 2020; Jain et al., 2021) , equivalent constraint theory (Pratapa et al., 2018) , pointer-generator network (Winata et al., 2018 (Winata et al., , 2019 Gupta et al., 2020) , Generative Adversarial Networks (GANs) (Gao et al., 2019) , etc. The majority of the available datasets (Rijhwani et al., 2017; Solorio et al., 2014; Patro et al., 2017) employed in code-mixed NLG contains noisy codemixed text collected from social media platforms such as Twitter. These datasets also lack the sanity check for the quality of sentences, making the systems developed on these datasets vulnerable to real-world applicability. To address the challenge of scarcity of high-quality resources for the codemixed NLG tasks, we propose HinGE dataset 1 that will facilitate the community to build robust systems. The dataset contains sentences generated by humans as well as two rule-based algorithms. In Table 1 , we compare HinGE with three other baseline datasets that can be used in the Hinglish code-mixed text generation and evaluation task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 187, |
|
"end": 205, |
|
"text": "(Lee et al., 2019;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 206, |
|
"end": 225, |
|
"text": "Gupta et al., 2020;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 226, |
|
"end": 244, |
|
"text": "Jain et al., 2021)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 276, |
|
"end": 298, |
|
"text": "(Pratapa et al., 2018)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 327, |
|
"end": 347, |
|
"text": "(Winata et al., 2018", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 348, |
|
"end": 370, |
|
"text": "(Winata et al., , 2019", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 371, |
|
"end": 390, |
|
"text": "Gupta et al., 2020)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 432, |
|
"end": 450, |
|
"text": "(Gao et al., 2019)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 497, |
|
"end": 520, |
|
"text": "(Rijhwani et al., 2017;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 521, |
|
"end": 542, |
|
"text": "Solorio et al., 2014;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 543, |
|
"end": 562, |
|
"text": "Patro et al., 2017)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1105, |
|
"end": 1112, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In addition to the code-mixed NLG, the evaluation of the generated code-mixed text is a challenging task. The widely popular metrics for monolin- Only ES Only ES Human-generated code-mixed sentences Multiple human-generated code-mixed sentences for a parallel sentence Machine-generated code-mixed sentences Multiple machine-generated code-mixed sentences for a parallel sentence Human ratings for the quality of generated code-mixed sentences gual languages fail to capture the linguistic diversity present in the code-mixed data, such as spelling variation and complex sentence structuring. The quality ratings of the sentences generated by the rule-based algorithms in HinGE dataset will help to develop the metrics and theories for evaluating the code-mixed NLG tasks. Our main contributions are:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We create high-quality human-generated codemixed Hinglish sentences corresponding to the parallel Hindi-English sentences. Each pair of parallel sentences has at least two human-generated Hinglish sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 In addition to the human-generated codemixed sentences, we propose two rule-based algorithms to generate the Hinglish sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We demonstrate the inefficacy of five widely popular metrics for the NLG task with the code-mixed text.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 To develop efficient metrics for the codemixed NLG tasks, we provide the human ratings corresponding to the code-mixed sentences generated by the rule-based algorithms.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "2 Human-Generated Hinglish Text", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The scarcity of high-quality code-mixed datasets limits current research in various NLU tasks such as text generation and summarization. To address this challenge, we create a human-generated corpus of Hinglish sentences corresponding to parallel monolingual English and Hindi sentences. We use the IIT Bombay English-Hindi Parallel Corpus (hereafter 'IIT-B corpus') (Kunchukuttan et al., 2018) . The IIT-B corpus has 1,561,840 parallel sentence pairs in English and Hindi. The English sentences are written in the Roman script, and the Hindi sentences are written in the Devanagari script. We randomly select 5,000 sentence pairs, in which the number of tokens in both the sentences is more than five to create a human-generated parallel corpus.", |
|
"cite_spans": [ |
|
{ |
|
"start": 367, |
|
"end": 394, |
|
"text": "(Kunchukuttan et al., 2018)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To create the gold-standard dataset, we employ five human annotators. Each annotator has expertlevel proficiency in writing, speaking, and understanding English and Hindi languages. The objective of the annotation is to generate at least two unique Hinglish code-mixed sentences corresponding to the parallel English and Hindi sentence pairs. The annotators can also generate more than two code-mixed sentences for each sentence pair. We shuffle, pre-process, and share the sentence pairs with the annotators to generate the corresponding Hinglish sentences. A single annotator annotates each sentence pair.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We assign 1,000 unique sentence pairs to each annotator with the following annotation guidelines:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 The Hinglish sentence should be written in Roman script.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 The Hinglish sentence should have words from both the languages, i.e., English and Hindi.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Avoid using new words, wherever possible, that are not present in both the sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 If the source sentences are not the translation of each other, mark the sentence pair as \"#\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Post annotation, we remove the sentence pairs marked as \"#\" or are missing an annotation. Note that due to the complexity of generating codemixed sentences, such as the inability to iden-tify two unique Hinglish sentences, complex sentence structuring, and usage of difficult words in monolingual sentences, annotators do not provide two unique sentences Hinglish sentences for each monolingual sentence pair. We obtain 1,978 sentence pairs with two or more unique Hinglish sentences. On average, 2.5 code-mixed sentences are created for each Hindi-English sentence pair. Figure 1 shows an example of two code-mixed sentences generated by the annotator for a given sentence pair.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 572, |
|
"end": 580, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Qualitative evaluation of the human-generated Hinglish text: To qualitatively evaluate the generated sentences, we adapt the evaluation strategy described in (Srivastava and Singh, 2021) . We randomly sample 100 Hinglish sentences generated by humans along with the source parallel monolingual English and Hindi sentences. We employ two human annotators 2 for the qualitative evaluation. We ask the annotators to rate each Hinglish sentence on two metrics:", |
|
"cite_spans": [ |
|
{ |
|
"start": 158, |
|
"end": 186, |
|
"text": "(Srivastava and Singh, 2021)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Degree of code-mixing (DCM): The codemixing index (CMI) proposed by (Das and Gamb\u00e4ck, 2014) measures the degree of codemixing in a text based on language tags of the participating tokens. The objective of DCM follows CMI with an exception of explicit token language identification to measure the degree of code-mixing in the text. The DCM score can vary between 0 to 10. A DCM score of 0 corresponds to the monolingual sentence with no code-mixing, whereas the DCM score of 10 suggests a high degree of code-mixing. \u2022 Readability (RA): RA score can vary between 0 to 10. A completely unreadable sentence due to many spelling mistakes, no sentence structuring, or meaning yields a RA score of 0. A RA score of 10 suggests a highly readable sentence with clear semantics and easy-to-read words.", |
|
"cite_spans": [ |
|
{ |
|
"start": 70, |
|
"end": 93, |
|
"text": "(Das and Gamb\u00e4ck, 2014)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The average DCM scores by the two human annotators are 8.72 and 8.65. The average RA scores are 8.65 and 8.37. The high average scores demonstrate good quality code-mixed sentence generation. Table 2 shows example ratings provided by the two human annotators to the five Hinglish sentences. 3 Machine-Generated Hinglish Text", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 192, |
|
"end": 199, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In addition to the human-generated code-mixed text, we also generate the Hinglish sentence synthetically by following the Embedded-Matrix theory (Joshi, 1982) . We propose two rule-based Hinglish text generation systems leveraging the parallel monolingual English and Hindi sentences. In both systems, we use Hindi as the matrix language and English as the embedded language. The matrix language imparts structure to the code-mixed text with tokens embedded from embedded language. We also use several linguistic resources in both generation systems. These resources include:", |
|
"cite_spans": [ |
|
{ |
|
"start": 145, |
|
"end": 158, |
|
"text": "(Joshi, 1982)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 English-Hindi Dictionary:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We curate 77,805 pairs of English words and the corresponding Hindi meanings from two sources 3, 4 to construct an English-Hindi dictionary.", |
|
"cite_spans": [ |
|
{ |
|
"start": 94, |
|
"end": 96, |
|
"text": "3,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 97, |
|
"end": 98, |
|
"text": "4", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Cross-lingual Word Embedding: We leverage multilingual word vectors for English and Hindi tokens. These word-vectors (dim = 300) are generated from fastText's multilingual pre-trained model (Bojanowski et al., 2017) . We further map these vectors to a common space using VecMap (Artetxe et al., 2018 ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 192, |
|
"end": 217, |
|
"text": "(Bojanowski et al., 2017)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 280, |
|
"end": 301, |
|
"text": "(Artetxe et al., 2018", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 GIZA++: GIZA++ (Och and Ney, 2003) learns the word alignment between the parallel sentences using an HMM based alignment model in an unsupervised manner. We train GIZA++ on IIT-B corpus.", |
|
"cite_spans": [ |
|
{ |
|
"start": 17, |
|
"end": 36, |
|
"text": "(Och and Ney, 2003)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Script Transliteration: We transliterate the code-mixed sentences containing tokens in the Devanagari script to the Roman script (Mishra, 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 131, |
|
"end": 145, |
|
"text": "(Mishra, 2019)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Hinglish sentences Human 1 Human 2 DCM RA DCM RA Media ke exposure ke aadhar par Indian rajyo ki rankings 10 10 8 8 You will be more likely to give up before the 30 minutes, aap logo se jyada he. 7 8 9 8 Shighra hi maansingh british ke saath saude baazi kar rha tha. 8 10 8 8 par there's another way, aur mai aapko bata kar ja rahi hun. 9 10 9 9 \"Aren't you a tiny bit andhvishavaasi?\" 9 9 9 9 Table 2 : Example DCM and RA ratings provided by the two human annotators to the human-generated Hinglish sentences. We color code the tokens in the Hinglish sentence based on the language with the scheme: English tokens with orange, Hindi tokens with blue and language independent tokens with black color.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 394, |
|
"end": 401, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 YAKE: We use YAKE (Campos et al., 2020) , an unsupervised automatic keyword extraction method, to extract the key-phrases from the monolingual English and Hindi sentences.", |
|
"cite_spans": [ |
|
{ |
|
"start": 20, |
|
"end": 41, |
|
"text": "(Campos et al., 2020)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We further extend the English-Hindi dictionary by incorporating parallel sentences in the IIT-B corpus. We leverage VecMap's shared representation to identify the closest word in English and the corresponding Hindi sentence. In addition, we also leverage GIZA++ to align the parallel sentences resulting in aligned English-Hindi tokens. Both of these steps extend the initial dictionary from 77,809 to 1,52,821 words and meaning pairs. We use this extended dictionary in the following two code-mixed text generation systems:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Word-aligned code-mixing (WAC): Here, we align the noun and adjective tokens between the parallel sentences using the extended dictionary. We replace all the aligned Hindi tokens with the corresponding English noun or adjective token and transliterate the resultant Hindi sentence to the Roman script. Figure 2 demonstrates the example Hinglish text generated from the parallel monolingual English and Hindi sentences using the WAC procedure.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 304, |
|
"end": 312, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Phrase-aligned code-mixing (PAC): Here, we align the keyphrases of length up to three tokens between the parallel sentences. To identify the keyphrases, we use the YAKE tool. We replace all the aligned Hindi phrases with the corresponding longest matching English phrase and transliterate the resultant Hindi sentence to the Roman script. Figure 3 demonstrates the example Hinglish text generated from the parallel monolingual English and Hindi sentences using the PAC procedure.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 341, |
|
"end": 349, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "It should be noted that the code-mixed sentences generated using the WAC and PAC procedures have added noise (spelling variations, grammatical inconsistencies, etc.) and are not an exact representation of the high-quality code-mixed text generated by humans. This noisy machine-generation of the code-mixed text is intentional such that the efficacy of the evaluation metrics could be studied (see Section 4) for the wide spectrum of sentences and should not be limited to good quality code-mixed sentences. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this section, we evaluate machine-generated code-mixed text. This study demonstrates severe limitations of five widely popular NLP metrics in evaluating code-mixed text generation performance. We leverage the following metrics: (i) Bilingual Evaluation Understudy Score (BLEU, Papineni et al. (2002) ), (ii) NIST (Doddington, 2002), (iii) BERTScore (BS, Zhang et al. (2019b)), (iv) Word Error Rate (WER, Levenshtein (1966)), and (v) Translator Error Rate (TER, Snover et al. (2006) ). Higher BLEU, NIST, or BS values and lower WER or TER values represent better generation performance. We conduct two experiments to evaluate the machine-generated Hinglish text:", |
|
"cite_spans": [ |
|
{ |
|
"start": 280, |
|
"end": 302, |
|
"text": "Papineni et al. (2002)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 458, |
|
"end": 484, |
|
"text": "(TER, Snover et al. (2006)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Study on Evaluation of Code-Mixed Text Generation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 Human evaluation: First, we perform coarsegrained qualitative evaluation by randomly sampling 100 English-Hindi sentence pairs and corresponding WAC and PAC generated sentences. We employ two human evaluators 5 who are proficient in English and Hindi languages to evaluate the quality of the generated sentences. We ask evaluators to provide one of the two labels -Correct and Incorrect -to each of the generated sentences. A sentence is marked Correct if it is following the semantics of the parallel sentences and has high readability. Table 3 shows the annotator's agreement on the randomly sampled set of sentences. The coarse-grained qualitative evaluation shows the correct generation in at least 50% of the cases. Table 4 shows the evaluation of the randomly sam- 5 The evaluators are different from the annotators employed in Section 2). pled 100 sentences on five metrics. The results show the inefficacy of automatic evaluation metrics to capture the linguistic diversity of the generated code-mixed text due to spelling variations, omitted words, limited reference sentences, and informal writing style (Srivastava and Singh, 2020) . We further conduct a fine-grained qualitative human evaluation to measure the similarity between the machinegenerated Hinglish sentences and the monolingual pair, the readability, and the grammatical correctness. We employ a new set of eight human evaluators to provide a rating between 1 (low quality) to 10 (high quality) to the PAC and WAC generated Hinglish sentences based on the following three parameters:", |
|
"cite_spans": [ |
|
{ |
|
"start": 773, |
|
"end": 774, |
|
"text": "5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1132, |
|
"end": 1144, |
|
"text": "Singh, 2020)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 540, |
|
"end": 547, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 723, |
|
"end": 730, |
|
"text": "Table 4", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A Study on Evaluation of Code-Mixed Text Generation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "-The similarity between the generated Hinglish and the monolingual source sentences. -The readability of the generated sentence. -The grammatical correctness of the generated sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Study on Evaluation of Code-Mixed Text Generation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Each generated sentence is rated by two human evaluators. All the evaluators have expertlevel proficiency in the English and Hindi languages. Table 5 shows ratings provided to a representative machine-generated Hinglish sentence. Figure 4 shows the distribution of the fine-grained human evaluation scores. For both procedures, the majority (WAC: 82.3% and PAC: 75.2%) of the sentences score in the range 6-9. None of the sentences received a rating score of 1. The results further corroborate our claim that automatic evaluation metrics undermine code-mixed text generation performance. Figure 5 shows the distribution of the disagreement in the human evaluation of the generated sentences. We calculate the disagreement as to the absolute difference between the human evaluation scores. PAC-generated sentences are more prone to high disagreement (>=5) in the human evaluation than WAC. This could be attributed to the fact that PAC-generated sentences are relatively less constrained, which leaves the evaluation to the expertise and interpretation of the Hinglish language by the annotators.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 142, |
|
"end": 149, |
|
"text": "Table 5", |
|
"ref_id": "TABREF5" |
|
}, |
|
{ |
|
"start": 230, |
|
"end": 238, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF4" |
|
}, |
|
{ |
|
"start": 588, |
|
"end": 596, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A Study on Evaluation of Code-Mixed Text Generation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 the machine-generated code-mixed text. Table 6 shows the average scores for each of the metrics against the corresponding humanprovided rating. As evident from the results, the metrics perform poorly on the code-mixed data. We further analyze the correlation 6 of the metric scores with the human-provided ratings for both the text generation procedures.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 41, |
|
"end": 48, |
|
"text": "Table 6", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A Study on Evaluation of Code-Mixed Text Generation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "For this task, we divide the human ratings into three buckets: -Bucket 1: Human rating between 2-10.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Study on Evaluation of Code-Mixed Text Generation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "-Bucket 2: Human rating between 2-5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Study on Evaluation of Code-Mixed Text Generation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "-Bucket 3: Human rating between 6-10. Table 7 shows the results of the correlation between various metric scores and the human rating to the machine-generated code-mixed sentences using WAC and PAC procedures. Human rating for generated sentences in Bucket 3 is relatively highly correlated with the metric scores compared to Bucket 2. This behavior could be attributed to the fact that low-quality sentences are difficult to rate for the human annotators due to various reasons such as poor sentence structuring and many spelling mistakes.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 38, |
|
"end": 45, |
|
"text": "Table 7", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A Study on Evaluation of Code-Mixed Text Generation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In this section, we present a discussion on the various inherent limitations associated with the proposed HinGE dataset. We also discuss the various opportunities for the computational linguistic community to build efficient systems and metrics for code-mixed languages. Some of the major limitations with the dataset are:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations and Opportunities", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 Due to the high time and cost associated with the human annotations, the number of samples in the dataset is limited. Because of a similar reason, the other code-mixing datasets (Srivastava and Singh, 2021) suffer from the scarcity of large-scale human annotations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 180, |
|
"end": 208, |
|
"text": "(Srivastava and Singh, 2021)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations and Opportunities", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 The IIT-B parallel corpus does not contain sentences mined from the social media platforms. This potentially reduces the noise in the generated sentences compared to the dataset previously compiled from the social media platforms for various tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations and Opportunities", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 The annotators generating the code-mixed sentences were constrained only to include words from the parallel source sentences. This could potentially limit the observations compared to the real-world datasets collected from social media platforms that are more linguistically diverse due to the presence of a large number of multilingual speakers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations and Opportunities", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 Due to the high annotation cost and time, the WAC and PAC generated sentences are only rated on a single scale encompassing multiple dimensions such as grammatical correctness, readability, etc.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations and Opportunities", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Even with the presence of the above limitations, the HinGE dataset could be effectively used for various purposes such as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations and Opportunities", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 The dataset could be effectively used in developing code-mixing text generation systems. Currently, the dataset supports only one codemixed language, i.e., Hinglish, but it could be extended using various techniques such as weak supervision and active learning.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations and Opportunities", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 The machine-generated sentences and the corresponding human ratings will be useful in designing metrics and systems for the effective evaluation of various code-mixed NLG tasks. It could also be used to investigate the factors influencing the quality of the code-mixed text.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations and Opportunities", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 The dataset could also be used in investigating the reasoning behind the disagreement in the human scores to the machine-generated sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations and Opportunities", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 The code-mixed text (human or machinegenerated) could be useful in the multitude of other code-mixing tasks such as language identification and POS tagging.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations and Opportunities", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 With the recent thrust in code-mixed machine translation, the HinGE dataset would be extremely useful in designing and evaluating the machine-translation systems. The multiple code-mixed sentences corresponding to a given pair of parallel monolingual sentences would help to build robust translation systems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations and Opportunities", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In this paper, we present a high-quality dataset (HinGE) for the text-generation and evaluation task in the code-mixed Hinglish language. The codemixed sentences in the HinGE dataset are generated by humans and rule-based algorithms. We demonstrate the poor evaluation capabilities of five widely popular metrics on the code-mixed data. Along with the human-generated sentences, the machinegenerated sentences (as described in Section 3) and the human ratings of these code-mixed sentences could facilitate building the highly scalable and robust evaluation metrics and strategies for the code-mixed text. The multiple human-generated sentences corresponding to a pair of parallel monolingual sentences will pave the way in designing natural language generation systems robust to adversaries and linguistic diversities such as spelling variation and matrix language.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "https://sites.google.com/view/viveksrivastava/resources", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "different from the human annotators who generate the Hinglish sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://www.cfilt.iitb.ac.in/~hdict/ webinterface_user/index.php 4 https://jankaribook.com/verbs-listverb-in-hindi/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We experiment with Pearson Correlation Coefficient. The value ranges from -1 to 1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "A robust self-learning method for fully unsupervised cross-lingual mappings of word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Mikel", |
|
"middle": [], |
|
"last": "Artetxe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gorka", |
|
"middle": [], |
|
"last": "Labaka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "789--798", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mikel Artetxe, Gorka Labaka, and Eneko Agirre. 2018. A robust self-learning method for fully unsupervised cross-lingual mappings of word embeddings. In Pro- ceedings of the 56th Annual Meeting of the Associa- tion for Computational Linguistics (Volume 1: Long Papers), pages 789-798.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "A dataset for building code-mixed goal oriented conversation systems", |
|
"authors": [ |
|
{ |
|
"first": "Suman", |
|
"middle": [], |
|
"last": "Banerjee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikita", |
|
"middle": [], |
|
"last": "Moghe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siddhartha", |
|
"middle": [], |
|
"last": "Arora", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mitesh M", |
|
"middle": [], |
|
"last": "Khapra", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3766--3780", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Suman Banerjee, Nikita Moghe, Siddhartha Arora, and Mitesh M Khapra. 2018. A dataset for building code-mixed goal oriented conversation systems. In Proceedings of the 27th International Conference on Computational Linguistics, pages 3766-3780.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Enriching word vectors with subword information", |
|
"authors": [ |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "135--146", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enriching word vectors with subword information. Transactions of the Associa- tion for Computational Linguistics, 5:135-146.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Yake! keyword extraction from single documents using multiple local features", |
|
"authors": [ |
|
{ |
|
"first": "Ricardo", |
|
"middle": [], |
|
"last": "Campos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V\u00edtor", |
|
"middle": [], |
|
"last": "Mangaravite", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arian", |
|
"middle": [], |
|
"last": "Pasquali", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Al\u00edpio", |
|
"middle": [], |
|
"last": "Jorge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C\u00e9lia", |
|
"middle": [], |
|
"last": "Nunes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Jatowt", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Information Sciences", |
|
"volume": "509", |
|
"issue": "", |
|
"pages": "257--289", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ricardo Campos, V\u00edtor Mangaravite, Arian Pasquali, Al\u00edpio Jorge, C\u00e9lia Nunes, and Adam Jatowt. 2020. Yake! keyword extraction from single documents using multiple local features. Information Sciences, 509:257-289.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Identifying languages at the word level in code-mixed indian social media text", |
|
"authors": [ |
|
{ |
|
"first": "Amitava", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bj\u00f6rn", |
|
"middle": [], |
|
"last": "Gamb\u00e4ck", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 11th International Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "378--387", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amitava Das and Bj\u00f6rn Gamb\u00e4ck. 2014. Identifying languages at the word level in code-mixed indian so- cial media text. In Proceedings of the 11th Interna- tional Conference on Natural Language Processing, pages 378-387.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Automatic evaluation of machine translation quality using n-gram cooccurrence statistics", |
|
"authors": [ |
|
{ |
|
"first": "George", |
|
"middle": [], |
|
"last": "Doddington", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the second international conference on Human Language Technology Research", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "138--145", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "George Doddington. 2002. Automatic evaluation of machine translation quality using n-gram co- occurrence statistics. In Proceedings of the second international conference on Human Language Tech- nology Research, pages 138-145.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Code-switching sentence generation by bert and generative adversarial networks", |
|
"authors": [ |
|
{ |
|
"first": "Yingying", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junlan", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ying", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leijing", |
|
"middle": [], |
|
"last": "Hou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xin", |
|
"middle": [], |
|
"last": "Pan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yong", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 20th Annual Conference of the International Speech Communication Association, INTERSPEECH 2019", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3525--3529", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yingying Gao, Junlan Feng, Ying Liu, Leijing Hou, Xin Pan, and Yong Ma. 2019. Code-switching sen- tence generation by bert and generative adversarial networks. In Proceedings of the 20th Annual Confer- ence of the International Speech Communication As- sociation, INTERSPEECH 2019, pages 3525-3529.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "A semi-supervised approach to generate the code-mixed text using pre-trained encoder and transfer learning", |
|
"authors": [ |
|
{ |
|
"first": "Deepak", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Asif", |
|
"middle": [], |
|
"last": "Ekbal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pushpak", |
|
"middle": [], |
|
"last": "Bhattacharyya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Findings of the Association for Computational Linguistics: EMNLP 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2267--2280", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.findings-emnlp.206" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Deepak Gupta, Asif Ekbal, and Pushpak Bhattacharyya. 2020. A semi-supervised approach to generate the code-mixed text using pre-trained encoder and trans- fer learning. In Findings of the Association for Com- putational Linguistics: EMNLP 2020, pages 2267- 2280, Online. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Codeswitched sentence creation using dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Dhruval", |
|
"middle": [], |
|
"last": "Jain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Arun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shubham", |
|
"middle": [], |
|
"last": "Prabhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gopi", |
|
"middle": [], |
|
"last": "Vatsal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naresh", |
|
"middle": [], |
|
"last": "Ramena", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Purre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "2021 IEEE 15th International Conference on Semantic Computing (ICSC)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "124--129", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/ICSC50631.2021.00030" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dhruval Jain, Arun D Prabhu, Shubham Vatsal, Gopi Ramena, and Naresh Purre. 2021. Codeswitched sentence creation using dependency parsing. In 2021 IEEE 15th International Conference on Seman- tic Computing (ICSC), pages 124-129, Los Alami- tos, CA, USA. IEEE Computer Society.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Towards sub-word level compositions for sentiment analysis of hindi-english code mixed text", |
|
"authors": [ |
|
{ |
|
"first": "Aditya", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ameya", |
|
"middle": [], |
|
"last": "Prabhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manish", |
|
"middle": [], |
|
"last": "Shrivastava", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vasudeva", |
|
"middle": [], |
|
"last": "Varma", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2482--2491", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aditya Joshi, Ameya Prabhu, Manish Shrivastava, and Vasudeva Varma. 2016. Towards sub-word level compositions for sentiment analysis of hindi-english code mixed text. In Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers, pages 2482-2491.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Processing of sentences with intra-sentential code-switching", |
|
"authors": [ |
|
{ |
|
"first": "Aravind", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1982, |
|
"venue": "Coling 1982: Proceedings of the Ninth International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aravind Joshi. 1982. Processing of sentences with intra-sentential code-switching. In Coling 1982: Proceedings of the Ninth International Conference on Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "The iit bombay english-hindi parallel corpus", |
|
"authors": [ |
|
{ |
|
"first": "Anoop", |
|
"middle": [], |
|
"last": "Kunchukuttan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pratik", |
|
"middle": [], |
|
"last": "Mehta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pushpak", |
|
"middle": [], |
|
"last": "Bhattacharyya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anoop Kunchukuttan, Pratik Mehta, and Pushpak Bhat- tacharyya. 2018. The iit bombay english-hindi par- allel corpus. In Proceedings of the Eleventh Interna- tional Conference on Language Resources and Eval- uation (LREC 2018).", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Linguistically Motivated Parallel Data Augmentation for Code-Switch Language Modeling", |
|
"authors": [ |
|
{ |
|
"first": "Grandee", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xianghu", |
|
"middle": [], |
|
"last": "Yue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haizhou", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proc. Interspeech 2019", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3730--3734", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.21437/Interspeech.2019-1382" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Grandee Lee, Xianghu Yue, and Haizhou Li. 2019. Lin- guistically Motivated Parallel Data Augmentation for Code-Switch Language Modeling. In Proc. In- terspeech 2019, pages 3730-3734.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Binary codes capable of correcting deletions, insertions, and reversals", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Vladimir I Levenshtein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1966, |
|
"venue": "Soviet physics doklady", |
|
"volume": "10", |
|
"issue": "", |
|
"pages": "707--710", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vladimir I Levenshtein. 1966. Binary codes capable of correcting deletions, insertions, and reversals. In Soviet physics doklady, volume 10, pages 707-710.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "devanagari-to-roman-scripttransliteration", |
|
"authors": [ |
|
{ |
|
"first": "Ritwik", |
|
"middle": [], |
|
"last": "Mishra", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ritwik Mishra. 2019. devanagari-to-roman-script- transliteration. https://github.com/ ritwikmishra/devanagari-to-roman- script-transliteration.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "A systematic comparison of various statistical alignment models", |
|
"authors": [ |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Franz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Och", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Computational Linguistics", |
|
"volume": "29", |
|
"issue": "1", |
|
"pages": "19--51", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Franz Josef Och and Hermann Ney. 2003. A systematic comparison of various statistical alignment models. Computational Linguistics, 29(1):19-51.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Bleu: a method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th annual meeting on association for computational linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic eval- uation of machine translation. In Proceedings of the 40th annual meeting on association for compu- tational linguistics, pages 311-318. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "All that is English may be Hindi: Enhancing language identification through automatic ranking of the likeliness of word borrowing in social media", |
|
"authors": [ |
|
{ |
|
"first": "Jasabanta", |
|
"middle": [], |
|
"last": "Patro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bidisha", |
|
"middle": [], |
|
"last": "Samanta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Saurabh", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abhipsa", |
|
"middle": [], |
|
"last": "Basu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prithwish", |
|
"middle": [], |
|
"last": "Mukherjee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2264--2274", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D17-1240" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jasabanta Patro, Bidisha Samanta, Saurabh Singh, Ab- hipsa Basu, Prithwish Mukherjee, Monojit Choud- hury, and Animesh Mukherjee. 2017. All that is English may be Hindi: Enhancing language identi- fication through automatic ranking of the likeliness of word borrowing in social media. In Proceed- ings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 2264-2274, Copenhagen, Denmark. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Semeval-2020 task 9: Overview of sentiment analysis of code-mixed tweets", |
|
"authors": [ |
|
{ |
|
"first": "Parth", |
|
"middle": [], |
|
"last": "Patwa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gustavo", |
|
"middle": [], |
|
"last": "Aguilar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sudipta", |
|
"middle": [], |
|
"last": "Kar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Suraj", |
|
"middle": [], |
|
"last": "Pandey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bj\u00f6rn", |
|
"middle": [], |
|
"last": "Srinivas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tanmoy", |
|
"middle": [], |
|
"last": "Gamb\u00e4ck", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thamar", |
|
"middle": [], |
|
"last": "Chakraborty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amitava", |
|
"middle": [], |
|
"last": "Solorio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Fourteenth Workshop on Semantic Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "774--790", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Parth Patwa, Gustavo Aguilar, Sudipta Kar, Suraj Pandey, PYKL Srinivas, Bj\u00f6rn Gamb\u00e4ck, Tanmoy Chakraborty, Thamar Solorio, and Amitava Das. 2020. Semeval-2020 task 9: Overview of sentiment analysis of code-mixed tweets. In Proceedings of the Fourteenth Workshop on Semantic Evaluation, pages 774-790.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Language modeling for code-mixing: The role of linguistic theory based synthetic data", |
|
"authors": [ |
|
{ |
|
"first": "Adithya", |
|
"middle": [], |
|
"last": "Pratapa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gayatri", |
|
"middle": [], |
|
"last": "Bhat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Monojit", |
|
"middle": [], |
|
"last": "Choudhury", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sunayana", |
|
"middle": [], |
|
"last": "Sitaram", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandipan", |
|
"middle": [], |
|
"last": "Dandapat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kalika", |
|
"middle": [], |
|
"last": "Bali", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1543--1553", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-1143" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adithya Pratapa, Gayatri Bhat, Monojit Choudhury, Sunayana Sitaram, Sandipan Dandapat, and Kalika Bali. 2018. Language modeling for code-mixing: The role of linguistic theory based synthetic data. In Proceedings of the 56th Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers), pages 1543-1553, Melbourne, Aus- tralia. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Automatic turn-level language identification for code-switched spanish-english dialog", |
|
"authors": [ |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Vikram Ramanarayanan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yao", |
|
"middle": [], |
|
"last": "Pugh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Qian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Suendermann-Oeft", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "9th International Workshop on Spoken Dialogue System Technology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "51--61", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vikram Ramanarayanan, Robert Pugh, Yao Qian, and David Suendermann-Oeft. 2019. Automatic turn-level language identification for code-switched spanish-english dialog. In 9th International Work- shop on Spoken Dialogue System Technology, pages 51-61. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Estimating code-switching on Twitter with a novel generalized word-level language detection technique", |
|
"authors": [ |
|
{ |
|
"first": "Shruti", |
|
"middle": [], |
|
"last": "Rijhwani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Royal", |
|
"middle": [], |
|
"last": "Sequiera", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Monojit", |
|
"middle": [], |
|
"last": "Choudhury", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kalika", |
|
"middle": [], |
|
"last": "Bali", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chandra Shekhar", |
|
"middle": [], |
|
"last": "Maddila", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1971--1982", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P17-1180" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shruti Rijhwani, Royal Sequiera, Monojit Choud- hury, Kalika Bali, and Chandra Shekhar Maddila. 2017. Estimating code-switching on Twitter with a novel generalized word-level language detection technique. In Proceedings of the 55th Annual Meet- ing of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1971-1982, Van- couver, Canada. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Language identification framework in code-mixed social media text based on quantum lstm-the word belongs to which language? Modern", |
|
"authors": [ |
|
{ |
|
"first": "Shashi", |
|
"middle": [], |
|
"last": "Shekhar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dilip", |
|
"middle": [ |
|
"Kumar" |
|
], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Beg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Physics Letters B", |
|
"volume": "34", |
|
"issue": "06", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shashi Shekhar, Dilip Kumar Sharma, and MM Su- fyan Beg. 2020. Language identification framework in code-mixed social media text based on quantum lstm-the word belongs to which language? Mod- ern Physics Letters B, 34(06):2050086.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Language identification and named entity recognition in hinglish code mixed tweets", |
|
"authors": [ |
|
{ |
|
"first": "Kushagra", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Indira", |
|
"middle": [], |
|
"last": "Sen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ponnurangam", |
|
"middle": [], |
|
"last": "Kumaraguru", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of ACL 2018, Student Research Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "52--58", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kushagra Singh, Indira Sen, and Ponnurangam Ku- maraguru. 2018a. Language identification and named entity recognition in hinglish code mixed tweets. In Proceedings of ACL 2018, Student Re- search Workshop, pages 52-58.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "A twitter corpus for hindi-english code mixed pos tagging", |
|
"authors": [ |
|
{ |
|
"first": "Kushagra", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Indira", |
|
"middle": [], |
|
"last": "Sen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ponnurangam", |
|
"middle": [], |
|
"last": "Kumaraguru", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Sixth International Workshop on Natural Language Processing for Social Media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "12--17", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kushagra Singh, Indira Sen, and Ponnurangam Ku- maraguru. 2018b. A twitter corpus for hindi-english code mixed pos tagging. In Proceedings of the Sixth International Workshop on Natural Language Pro- cessing for Social Media, pages 12-17.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "A study of translation edit rate with targeted human annotation", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Snover", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bonnie", |
|
"middle": [], |
|
"last": "Dorr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Linnea", |
|
"middle": [], |
|
"last": "Micciulla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Makhoul", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of association for machine translation in the Americas", |
|
"volume": "200", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Snover, Bonnie Dorr, Richard Schwartz, Lin- nea Micciulla, and John Makhoul. 2006. A study of translation edit rate with targeted human annotation. In Proceedings of association for machine transla- tion in the Americas, volume 200. Cambridge, MA.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Overview for the first shared task on language identification in code-switched data", |
|
"authors": [ |
|
{ |
|
"first": "Thamar", |
|
"middle": [], |
|
"last": "Solorio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elizabeth", |
|
"middle": [], |
|
"last": "Blair", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Suraj", |
|
"middle": [], |
|
"last": "Maharjan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Bethard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mona", |
|
"middle": [], |
|
"last": "Diab", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mahmoud", |
|
"middle": [], |
|
"last": "Ghoneim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abdelati", |
|
"middle": [], |
|
"last": "Hawwari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fahad", |
|
"middle": [], |
|
"last": "Alghamdi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julia", |
|
"middle": [], |
|
"last": "Hirschberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alison", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pascale", |
|
"middle": [], |
|
"last": "Fung", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the First Workshop on Computational Approaches to Code Switching", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "62--72", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/W14-3907" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thamar Solorio, Elizabeth Blair, Suraj Mahar- jan, Steven Bethard, Mona Diab, Mahmoud Ghoneim, Abdelati Hawwari, Fahad AlGhamdi, Ju- lia Hirschberg, Alison Chang, and Pascale Fung. 2014. Overview for the first shared task on language identification in code-switched data. In Proceedings of the First Workshop on Computational Approaches to Code Switching, pages 62-72, Doha, Qatar. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Phinc: a parallel hinglish social media code-mixed corpus for machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Vivek", |
|
"middle": [], |
|
"last": "Srivastava", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mayank", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2004.09447" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vivek Srivastava and Mayank Singh. 2020. Phinc: a parallel hinglish social media code-mixed cor- pus for machine translation. arXiv preprint arXiv:2004.09447.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Challenges and limitations with the metrics measuring the complexity of code-mixed text", |
|
"authors": [ |
|
{ |
|
"first": "Vivek", |
|
"middle": [], |
|
"last": "Srivastava", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mayank", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the Fifth Workshop on Computational Approaches to Linguistic Code-Switching", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6--14", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vivek Srivastava and Mayank Singh. 2021. Challenges and limitations with the metrics measuring the com- plexity of code-mixed text. In Proceedings of the Fifth Workshop on Computational Approaches to Linguistic Code-Switching, pages 6-14.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "A corpus of english-hindi code-mixed tweets for sarcasm detection", |
|
"authors": [ |
|
{ |
|
"first": "Sahil", |
|
"middle": [], |
|
"last": "Swami", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ankush", |
|
"middle": [], |
|
"last": "Khandelwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vinay", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1805.11869" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sahil Swami, Ankush Khandelwal, Vinay Singh, Syed Sarfaraz Akhtar, and Manish Shrivastava. 2018. A corpus of english-hindi code-mixed tweets for sar- casm detection. arXiv preprint arXiv:1805.11869.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Stance detection in hindi-english code-mixed data", |
|
"authors": [ |
|
{ |
|
"first": "Jethva", |
|
"middle": [], |
|
"last": "Utsav", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhaiwat", |
|
"middle": [], |
|
"last": "Kabaria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ribhu", |
|
"middle": [], |
|
"last": "Vajpeyi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Mina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vivek", |
|
"middle": [], |
|
"last": "Srivastava", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 7th ACM IKDD CoDS and 25th COMAD", |
|
"volume": "2020", |
|
"issue": "", |
|
"pages": "359--360", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3371158.3371226" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jethva Utsav, Dhaiwat Kabaria, Ribhu Vajpeyi, Mohit Mina, and Vivek Srivastava. 2020. Stance detection in hindi-english code-mixed data. In Proceedings of the 7th ACM IKDD CoDS and 25th COMAD, CoDS COMAD 2020, page 359-360, New York, NY, USA. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Pos tagging of english-hindi code-mixed social media content", |
|
"authors": [ |
|
{ |
|
"first": "Yogarshi", |
|
"middle": [], |
|
"last": "Vyas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Spandana", |
|
"middle": [], |
|
"last": "Gella", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jatin", |
|
"middle": [], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kalika", |
|
"middle": [], |
|
"last": "Bali", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Monojit", |
|
"middle": [], |
|
"last": "Choudhury", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "974--979", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yogarshi Vyas, Spandana Gella, Jatin Sharma, Kalika Bali, and Monojit Choudhury. 2014. Pos tagging of english-hindi code-mixed social media content. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 974-979.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Learn to Code-Switch: Data Augmentation using Copy Mechanism on Language Modeling", |
|
"authors": [ |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Genta Indra Winata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chien-Sheng", |
|
"middle": [], |
|
"last": "Madotto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pascale", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Fung", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Genta Indra Winata, Andrea Madotto, Chien-Sheng Wu, and Pascale Fung. 2018. Learn to Code-Switch: Data Augmentation using Copy Mechanism on Lan- guage Modeling. arXiv e-prints.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Code-switched language models using neural based synthetic data from parallel sentences", |
|
"authors": [ |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Genta Indra Winata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chien-Sheng", |
|
"middle": [], |
|
"last": "Madotto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pascale", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Fung", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "271--280", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/K19-1026" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Genta Indra Winata, Andrea Madotto, Chien-Sheng Wu, and Pascale Fung. 2019. Code-switched lan- guage models using neural based synthetic data from parallel sentences. In Proceedings of the 23rd Con- ference on Computational Natural Language Learn- ing (CoNLL), pages 271-280, Hong Kong, China. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Cross-lingual dependency parsing using code-mixed treebank", |
|
"authors": [ |
|
{ |
|
"first": "Meishan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guohong", |
|
"middle": [], |
|
"last": "Fu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "996--1005", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Meishan Zhang, Yue Zhang, and Guohong Fu. 2019a. Cross-lingual dependency parsing using code-mixed treebank. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natu- ral Language Processing (EMNLP-IJCNLP), pages 996-1005.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Bertscore: Evaluating text generation with bert", |
|
"authors": [ |
|
{ |
|
"first": "Tianyi", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Varsha", |
|
"middle": [], |
|
"last": "Kishore", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Q", |
|
"middle": [], |
|
"last": "Kilian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Weinberger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Artzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1904.09675" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q Weinberger, and Yoav Artzi. 2019b. Bertscore: Evaluating text generation with bert. arXiv preprint arXiv:1904.09675.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"num": null, |
|
"text": "Dataset characteristicsBanerjee et al.(2018)Srivastava and Singh (2020) Gupta et al. (2020) HinGE Parallel source sentences", |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"num": null, |
|
"text": "Example of the code-mixed sentences generated by the annotator for an English-Hindi sentence pair.", |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"num": null, |
|
"text": "An example Hinglish code-mixed sentence generated using WAC method.", |
|
"type_str": "figure" |
|
}, |
|
"FIGREF3": { |
|
"uris": null, |
|
"num": null, |
|
"text": "An example Hinglish code-mixed sentence generated using PAC method.", |
|
"type_str": "figure" |
|
}, |
|
"FIGREF4": { |
|
"uris": null, |
|
"num": null, |
|
"text": "Distribution of human evaluation of the generated Hinglish sentences using WAC and PAC.", |
|
"type_str": "figure" |
|
}, |
|
"FIGREF5": { |
|
"uris": null, |
|
"num": null, |
|
"text": "Distribution of the disagreement between human evaluation of the generated Hinglish sentences using WAC and PAC.", |
|
"type_str": "figure" |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"text": "Comparison between the various datasets available for the Hinglish NLG tasks. UEU: Unique English Utterance, UHU: Unique Hinglish Utterance, ES: English Sentences, PS: Parallel Sentences, HGHS: Human-Generated Hinglish Sentences, MGHS: Machine-Generated Hinglish Sentences.", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF2": { |
|
"num": null, |
|
"text": "Metric-based evaluation: Next, we analyze the performance of five evaluation metrics on", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td/><td>WAC</td><td/><td>PAC</td><td/></tr><tr><td/><td colspan=\"2\">Agree Disagree</td><td colspan=\"2\">Agree Disagree</td></tr><tr><td>Correct Incorrect</td><td>56 22</td><td>22</td><td>55 5</td><td>40</td></tr></table>" |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"text": "Annotator agreement on the randomly sampled sentences for WAC and PAC.", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td/><td>BLEU</td><td>WER</td><td>TER</td><td>NIST</td><td>BS</td></tr><tr><td colspan=\"6\">WAC 0.1229 0.8240 0.7830 2.2045 0.857</td></tr><tr><td>PAC</td><td colspan=\"5\">0.1202 0.8228 0.7981 2.0497 0.857</td></tr></table>" |
|
}, |
|
"TABREF4": { |
|
"num": null, |
|
"text": "Automatic performance evaluation of the WAC and PAC procedures on the randomly sampled sentences.", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF5": { |
|
"num": null, |
|
"text": "Example human-generated and machine-generated Hinglish sentences from the dataset along with the source English and Hindi sentences. Two different human annotators rate the synthetic Hinglish sentences on the scale 1-10 (low-high quality", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td>Human</td><td>WAC</td><td/><td>PAC</td><td/></tr><tr><td>Score</td><td>BLEU WER TER NIST</td><td>BS</td><td>BLEU WER TER NIST</td><td>BS</td></tr><tr><td>2</td><td colspan=\"4\">0.144 0.741 0.667 0.092 0.851 0.126 0.672 0.698 0.176 0.8603</td></tr><tr><td>3</td><td colspan=\"4\">0.138 0.735 0.708 0.070 0.852 0.146 0.765 0.696 0.086 0.851</td></tr><tr><td>4</td><td colspan=\"4\">0.133 0.695 0.666 0.103 0.849 0.143 0.744 0.703 0.100 0.8464</td></tr><tr><td>5</td><td colspan=\"4\">0.135 0.711 0.681 0.110 0.853 0.153 0.726 0.680 0.114 0.8515</td></tr><tr><td>6</td><td colspan=\"4\">0.141 0.697 0.670 0.102 0.852 0.164 0.689 0.646 0.124 0.8558</td></tr><tr><td>7</td><td colspan=\"4\">0.161 0.663 0.630 0.111 0.856 0.176 0.661 0.618 0.121 0.8581</td></tr><tr><td>8</td><td colspan=\"4\">0.177 0.621 0.589 0.127 0.859 0.177 0.639 0.605 0.128 0.8598</td></tr><tr><td>9</td><td colspan=\"4\">0.212 0.571 0.538 0.150 0.865 0.184 0.614 0.590 0.129 0.8638</td></tr><tr><td>10</td><td colspan=\"4\">0.291 0.509 0.493 0.157 0.878 0.242 0.551 0.543 0.146 0.8731</td></tr></table>" |
|
}, |
|
"TABREF6": { |
|
"num": null, |
|
"text": "Comparison of various metric scores with the human score for WAC and PAC.", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF7": { |
|
"num": null, |
|
"text": "Comparison of correlation between evaluation metrics and human scores for WAC and PAC.", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |