|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:31:20.881138Z" |
|
}, |
|
"title": "SIGMORPHON 2020 Shared Task 0: Typologically Diverse Morphological Inflection", |
|
"authors": [ |
|
{ |
|
"first": "Ekaterina", |
|
"middle": [], |
|
"last": "Vylomova", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Jennifer", |
|
"middle": [], |
|
"last": "White", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Elizabeth", |
|
"middle": [], |
|
"last": "Salesky", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Sabrina", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Mielke", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Shijie", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [ |
|
"Edoardo" |
|
], |
|
"last": "Ponti", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Q", |
|
"middle": [], |
|
"last": "Rowan", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Hall", |
|
"middle": [], |
|
"last": "Maudslay", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Q", |
|
"middle": [], |
|
"last": "Ran", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Valvoda", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Svetlana", |
|
"middle": [], |
|
"last": "Toldova", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Francis", |
|
"middle": [], |
|
"last": "Tyers", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Elena", |
|
"middle": [], |
|
"last": "Klyachko", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Yegorov", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Natalia", |
|
"middle": [], |
|
"last": "Krizhanovsky", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Paula", |
|
"middle": [], |
|
"last": "Czarnowska", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Irene", |
|
"middle": [], |
|
"last": "Nikkarinen", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Krizhanovsky", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Tiago", |
|
"middle": [], |
|
"last": "Pimentel", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Q", |
|
"middle": [], |
|
"last": "Lucas", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Torroba", |
|
"middle": [], |
|
"last": "Hennigen", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Christo", |
|
"middle": [], |
|
"last": "Kirov", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Google AI \u00e1 University of British Columbia F", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Garrett", |
|
"middle": [], |
|
"last": "Nicolai", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Adina", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Antonios", |
|
"middle": [], |
|
"last": "Anastasopoulos", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Carnegie Mellon University I Indiana University L University of Louisville", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Hilaria", |
|
"middle": [], |
|
"last": "Cruz", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Eleanor", |
|
"middle": [], |
|
"last": "Chodroff", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of York D ETH Z\u00fcrich X University of Colorado", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Cotterell", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Miikka", |
|
"middle": [], |
|
"last": "Silfverberg", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Mans", |
|
"middle": [], |
|
"last": "Hulden", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "A broad goal in natural language processing (NLP) is to develop a system that has the capacity to process any natural language. Most systems, however, are developed using data from just one language such as English. The SIG-MORPHON 2020 shared task on morphological reinflection aims to investigate systems' ability to generalize across typologically distinct languages, many of which are low resource. Systems were developed using data from 45 languages and just 5 language families, fine-tuned with data from an additional 45 languages and 10 language families (13 in total), and evaluated on all 90 languages. A total of 22 systems (19 neural) from 10 teams were submitted to the task. All four winning systems were neural (two monolingual transformers and two massively multilingual RNNbased models with gated attention). Most teams demonstrate utility of data hallucination and augmentation, ensembles, and multilingual training for low-resource languages. Nonneural learners and manually designed grammars showed competitive and even superior performance on some languages (such as Ingrian, Tajik, Tagalog, Zarma, Lingala), especially with very limited data. Some language families (Afro-Asiatic, Niger-Congo, Turkic) were relatively easy for most systems and achieved over 90% mean accuracy while others were more challenging.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "A broad goal in natural language processing (NLP) is to develop a system that has the capacity to process any natural language. Most systems, however, are developed using data from just one language such as English. The SIG-MORPHON 2020 shared task on morphological reinflection aims to investigate systems' ability to generalize across typologically distinct languages, many of which are low resource. Systems were developed using data from 45 languages and just 5 language families, fine-tuned with data from an additional 45 languages and 10 language families (13 in total), and evaluated on all 90 languages. A total of 22 systems (19 neural) from 10 teams were submitted to the task. All four winning systems were neural (two monolingual transformers and two massively multilingual RNNbased models with gated attention). Most teams demonstrate utility of data hallucination and augmentation, ensembles, and multilingual training for low-resource languages. Nonneural learners and manually designed grammars showed competitive and even superior performance on some languages (such as Ingrian, Tajik, Tagalog, Zarma, Lingala), especially with very limited data. Some language families (Afro-Asiatic, Niger-Congo, Turkic) were relatively easy for most systems and achieved over 90% mean accuracy while others were more challenging.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Human language is marked by considerable diversity around the world. Though the world's languages share many basic attributes (e.g., Swadesh, 1950 and more recently, List et al., 2016) , grammatical features, and even abstract implications (proposed in Greenberg, 1963) , each language nevertheless has a unique evolutionary trajectory that is affected by geographic, social, cultural, and other factors. As a result, the surface form of languages varies substantially. The morphology of languages can differ in many ways: Some exhibit rich grammatical case systems (e.g., 12 in Erzya and 24 in Veps) and mark possessiveness, others might have complex verbal morphology (e.g., Oto-Manguean languages; Palancar and L\u00e9onard, 2016) or even \"decline\" nouns for tense (e.g., Tupi-Guarani languages). Linguistic typology is the discipline that studies these variations by means of a systematic comparison of languages (Croft, 2002; Comrie, 1989) . Typologists have defined several dimensions of morphological variation to classify and quantify the degree of crosslinguistic variation. This comparison can be challenging as the categories are based on studies of known languages and are progressively refined with documentation of new languages (Haspelmath, 2007) . Nevertheless, to understand the potential range of morphological variation, we take a closer look at three dimensions here: fusion, inflectional synthesis, and position of case affixes (Dryer and Haspelmath, 2013) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 133, |
|
"end": 146, |
|
"text": "Swadesh, 1950", |
|
"ref_id": "BIBREF76" |
|
}, |
|
{ |
|
"start": 166, |
|
"end": 184, |
|
"text": "List et al., 2016)", |
|
"ref_id": "BIBREF55" |
|
}, |
|
{ |
|
"start": 253, |
|
"end": 269, |
|
"text": "Greenberg, 1963)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 912, |
|
"end": 925, |
|
"text": "(Croft, 2002;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 926, |
|
"end": 939, |
|
"text": "Comrie, 1989)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 1238, |
|
"end": 1256, |
|
"text": "(Haspelmath, 2007)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 1444, |
|
"end": 1472, |
|
"text": "(Dryer and Haspelmath, 2013)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Fusion, our first dimension of variation, refers to the degree to which morphemes bind to one another in a phonological word (Bickel and Nichols, 2013b) . Languages range from strictly isolating (i.e., each morpheme is its own phonological word) to concatenative (i.e., morphemes bind together within a phonological word); nonlinearities such as ablaut or tonal morphology can also be present. From a geographic perspective, isolating languages are found in the Sahel Belt in West Africa, Southeast Asia and the Pacific. Ablaut-concatenative morphology and tonal morphology can be found in African languages. Tonal-concatenative morphology can be found in Mesoamerican languages (e.g., Oto-Manguean) . Concatenative morphology is the most common system and can be found around the world. Inflectional synthesis, the second dimension considered, refers to whether grammatical categories like tense, voice or agreement are expressed as affixes (synthetic) or individual words (analytic) (Bickel and Nichols, 2013c) . Analytic expressions are common in Eurasia (except the Pacific Rim, and the Himalaya and Caucasus mountain ranges), whereas synthetic expressions are used to a high degree in the Americas. Finally, affixes can variably surface as prefixes, suffixes, infixes, or circumfixes (Dryer, 2013) . Most Eurasian and Australian languages strongly favor suffixation, and the same holds true, but to a lesser extent, for South American and New Guinean languages (Dryer, 2013) . In Mesoamerican languages and African languages spoken below the Sahara, prefixation is dominant instead.", |
|
"cite_spans": [ |
|
{ |
|
"start": 125, |
|
"end": 152, |
|
"text": "(Bickel and Nichols, 2013b)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 686, |
|
"end": 699, |
|
"text": "Oto-Manguean)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 985, |
|
"end": 1012, |
|
"text": "(Bickel and Nichols, 2013c)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 1289, |
|
"end": 1302, |
|
"text": "(Dryer, 2013)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 1466, |
|
"end": 1479, |
|
"text": "(Dryer, 2013)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "These are just three dimensions of variation in morphology, and the cross-linguistic variation is already considerable. Such cross-lingual variation makes the development of natural language processing (NLP) applications challenging. As Bender (2009 Bender ( , 2016 notes, many current architectures and training and tuning algorithms still present language-specific biases. The most commonly used language for developing NLP applications is English. Along the above dimensions, English is productively concatenative, a mixture of analytic and synthetic, and largely suffixing in its inflectional morphology. With respect to languages that exhibit inflectional morphology, English is relatively impoverished. 1 Importantly, English is just one morphological system among many. A larger goal of natural language processing is that the system work for any presented language. If an NLP system is trained on just one language, it could be missing important flexibility in its ability to account for cross-linguistic morphological variation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 237, |
|
"end": 249, |
|
"text": "Bender (2009", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 250, |
|
"end": 265, |
|
"text": "Bender ( , 2016", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this year's iteration of the SIGMORPHON shared task on morphological reinflection, we specifically focus on typological diversity and aim to investigate systems' ability to generalize across typologically distinct languages many of which are low-resource. For example, if a neural network architecture works well for a sample of Indo-European languages, should the same architecture also work well for Tupi-Guarani languages (where nouns are \"declined\" for tense) or Austronesian languages (where verbal morphology is frequently prefixing)?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The 2020 iteration of our task is similar to CoNLL-SIGMORPHON 2017 (Cotterell et al., 2017) and 2018 (Cotterell et al., 2018) in that participants are required to design a model that learns to generate inflected forms from a lemma and a set of morphosyntactic features that derive the desired target form. For each language we provide a separate training, development, and test set. More historically, all of these tasks resemble the classic \"wug\"-test that Berko (1958) developed to test child and human knowledge of English nominal morphology.", |
|
"cite_spans": [ |
|
{ |
|
"start": 67, |
|
"end": 91, |
|
"text": "(Cotterell et al., 2017)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 101, |
|
"end": 125, |
|
"text": "(Cotterell et al., 2018)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 458, |
|
"end": 470, |
|
"text": "Berko (1958)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Unlike the task from earlier years, this year's task proceeds in three phases: a Development Phase, a Generalization Phase, and an Evaluation Phase, in which each phase introduces previously unseen data. The task starts with the Development Phase, which was an elongated period of time (about two months), during which participants develop a model of morphological inflection. In this phase, we provide training and development splits for 45 languages representing the Austronesian, Niger-Congo, Oto-Manguean, Uralic and Indo-European language families. Table 1 provides details on the languages. The Generalization Phase is a short period of time (it started about a week before the Evaluation Phase) during which participants fine-tune their models on new data. At the start of the phase, we provide training and development splits for 45 new languages where approximately half are genetically related (belong to the same family) and half are genetically unrelated (are isolates or belong to a different family) to the languages presented in the Development Phase. More specifically, we introduce (surprise) languages from Afro-Asiatic, Algic, Dravidian, Indo-European, Niger-Congo, Sino-Tibetan, Siouan, Songhay, Southern Daly, Tungusic, Turkic, Uralic, and Uto-Aztecan families. See Table 2 for more details.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 554, |
|
"end": 561, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 1287, |
|
"end": 1294, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Finally, test splits for all 90 languages are released in the Evaluation Phase. During this phase, the models are evaluated on held-out forms. Importantly, the languages from both previous phases are evaluated simultaneously. This way, we evaluate the extent to which models (especially those with shared parameters) overfit to the development data: a model based on the morphological patterning of the Indo-European languages may end up with a bias towards suffixing and will struggle to learn prefixing or infixation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In the 2020 shared task we cover 15 language families: Afro-Asiatic, Algic, Austronesian, Dravidian, Indo-European, Niger-Congo, Oto-Manguean, Sino-Tibetan, Siouan, Songhay, Southern Daly, Tungusic, Turkic, Uralic, and Uto-Aztecan. 2 Five language families were used for the Development phase while ten were held out for the Generalization phase. Tab. 1 and Tab. 2 provide information on the languages, their families, and sources of data. In the following section, we provide an overview of each language family's morphological system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Meet our Languages", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The Afro-Asiatic language family, consisting of six branches and over 300 languages, is among the largest language families in the world. It is mainly spoken in Northern, Western and Central Africa as well as West Asia and spans large modern languages such as Arabic, in addition to ancient languages like Biblical Hebrew. Similarly, some of its languages have a long tradition of written form, while others have yet to incorporate a writing system. The six branches differ most notably in typology and syntax, with the Chadic language being the main source of differences, which has sparked discussion of the division of the family (Frajzyngier, 2018) . For example, in the Egyptian and Semitic branches, the root of a verb may not contain vowels, while this is allowed in Chadic. Although only four of the six branches, excluding Chadic and Omotic, use a prefix and suffix in conjugation when adding a subject to a verb, it is con-sidered an important characteristic of the family. In addition, some of the families in the phylum use tone to encode tense, modality and number among others. However, all branches use objective and passive suffixes. Markers of tense are generally simple, whereas aspect is typically distinguished with more elaborate systems.", |
|
"cite_spans": [ |
|
{ |
|
"start": 633, |
|
"end": 652, |
|
"text": "(Frajzyngier, 2018)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Afro-Asiatic", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The Algic family embraces languages native to North America-more specifically the United States and Canada-and contain three branches. Of these, our sample contains Cree, the language from the largest genus, Algonquian, from which most languages are now extinct. The Algonquian genus is characterized by its concatenative morphology. Cree morphology is also concatenative and suffixing. It distinguishes between impersonal and non-impersonal verbs and presents four apparent declension classes among non-impersonal verbs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Algic", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The Austronesian family of languages is largely comprised of languages from the Greater Central Philippine and Oceanic regions. They are characterized by limited morphology, mostly prefixing in nature. Additionally, tense-aspect affixes are predominantly seen as prefixes, though some suffixes are used. In the general case, verbs do not mark number, person, or gender. In M\u0101ori, verbs may be suffixed with a marker indicating the passive voice. This marker takes the form of one of twelve endings. These endings are difficult to predict as the language has undergone a loss of word-final consonants and there is no clear link between a stem and the passive suffix that it employs (Harlow, 2007) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 681, |
|
"end": 695, |
|
"text": "(Harlow, 2007)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Austronesian", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The family of Dravidian languages comprises several languages which are primarily spoken across Southern India and Northern Sri Lanka, with over 200 million speakers. The shared task includes Kannada and Telugu. Dravidian languages primarily use the SOV word order. They are agglutinative, and primarily use suffixes. A Dravidian verb indicates voice, number, tense, aspect, mood and person, through the affixation of multiple suffixes. Nouns indicate number, gender and case. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dravidian", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "Afro\u2212Asiatic Algic Austronesian Dravidian Indo\u2212European Niger\u2212Congo Oto\u2212Manguean Sino\u2212Tibetan Siouan Songhay Southern Daly Tungusic Turkic Uralic Uto\u2212Aztecan", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Family", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Languages in the Indo-European family are native to most of Europe and a large part of Asia-with our sample including languages from the genera: Germanic, Indic, Iranian, and Romance. This is (arguably) the most well studied language family, containing a few of the highest-resource languages in the world.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Indo-European", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "The Romance genus comprises of a set of fusional languages evolved from Latin. They traditionally originated in Southern and Southeastern Europe, though they are presently spoken in other continents such Africa and the Americas. Romance languages mark tense, person, number and mood in verbs, and gender and number in nouns. Inflection is primarily achieved through suffixes, with some verbal person syncretism and suppletion for high-frequency verbs. There is some morphological variation within the genus, such as French, which exhibits comparatively less inflection, and Romanian has comparatively more-it still marks case.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Romance", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Germanic The Germanic genus comprises several languages which originated in Northern and Northwestern Europe, and today are spoken in many parts of the world. Verbs in Germanic languages mark tense and mood, in many languages person and number are also marked, predominantly through suffixation. Some Germanic languages exhibit widespread Indo-European ablaut. The gendering of nouns differs between Germanic languages: German nouns can be masculine, feminine or neuter, while English nouns are not marked for gender. In Danish and Swedish, historically masculine and feminine nouns have merged to form one common gender, so nouns are either common or neuter. Marking of case also differs between the languages: German nouns have one of four cases and this case is marked in articles and adjectives as well as nouns and pronouns, while English does not mark noun case (although Old English, which also appears in our language sample, does).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Romance", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The Indo-Iranian genus contains languages spoken in Iran and across the Indian subcontinent. Over 1.5 billion people worldwide speak an Indo-Iranian language. Within the Indo-European family, Indo-Iranian languages belong to the Satem group of languages. Verbs in Indo-Iranian languages indicate tense, aspect, mood, number and person. In languages such as Hindi verbs can also express levels of formality. Noun gender is present in some Indo-Iranian languages, such as Hindi, but absent in languages such as Persian. Nouns generally are marked for case. no grammatical evidentials.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Indo-Iranian", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The Oto-Manguean languages are a diverse family of tonal languages spoken in central and southern Mexico. Even though all of these languages are tonal, the tonal system within each language varies widely. Some have an inventory of two tones (e.g., Chichimec and Pame) others have ten tones (e.g., the Eastern Chatino languages of the Zapotecan branch, Palancar and L\u00e9onard (2016)).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Oto-Manguean", |
|
"sec_num": "3.7" |
|
}, |
|
{ |
|
"text": "Oto-Manguean languages are also rich in tonal morphology.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Oto-Manguean", |
|
"sec_num": "3.7" |
|
}, |
|
{ |
|
"text": "The inflectional system marks person-number and aspect in verbs and personnumber in adjectives and noun possessions, relying heavily on tonal contrasts. Other interesting as-pects of Oto-Manguean languages include the fact that pronominal inflections use a system of enclitics, and first and second person plural has a distinction between exclusive and inclusive (Campbell, 2016) . Tone marking schemes in the writing systems also vary greatly. Some writing systems do not represent tone, others use diacritics, and others represent tones with numbers. In languages that use numbers, single digits represent level tones and double digits represent contour tones. For example, in San Juan Quiahije of Eastern Chatino number 1 represents high tone, number 4 represents low tone, and numbers 14 represent a descending tone contour and numbers 42 represent an ascending tone contour Cruz (2014).", |
|
"cite_spans": [ |
|
{ |
|
"start": 363, |
|
"end": 379, |
|
"text": "(Campbell, 2016)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Oto-Manguean", |
|
"sec_num": "3.7" |
|
}, |
|
{ |
|
"text": "The Sino-Tibetan family is represented by the Tibetan language. Tibetan uses an abugida script and contains complex syllabic components in which vowel marks can be added above and below the base consonant. Tibetan verbs are inflected for tense and mood. Previous studies on Tibetan morphology (Di et al., 2019) indicate that the majority of mispredictions produced by neural models are due to allomorphy. This is followed by generation of nonce words (impossible combinations of vowel and consonant components).", |
|
"cite_spans": [ |
|
{ |
|
"start": 293, |
|
"end": 310, |
|
"text": "(Di et al., 2019)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sino-Tibetan", |
|
"sec_num": "3.8" |
|
}, |
|
{ |
|
"text": "The Siouan languages are located in North America, predominantly along the Mississippi and Missouri Rivers and in the Ohio Valley. The family is represented in our task by Dakota, a critically endangered language spoken in North and South Dakota, Minnesota, and Saskatchewan. The Dakota language is largely agglutinating in its derivational morphology and fusional in its inflectional morphology with a mixed affixation system (Rankin et al., 2003) . The present task includes verbs, which are marked for first and second person, number, and duality. All three affixation types are found: person was generally marked by an infix, but could also appear as a prefix, and plurality was marked by a suffix. Morphophonological processes of fortition and vowel lowering are also present.", |
|
"cite_spans": [ |
|
{ |
|
"start": 427, |
|
"end": 448, |
|
"text": "(Rankin et al., 2003)", |
|
"ref_id": "BIBREF69" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Siouan", |
|
"sec_num": "3.9" |
|
}, |
|
{ |
|
"text": "The Songhay family consists of around eleven or twelve languages spoken in Mali, Niger, Benin, Burkina Faso and Nigeria. In the shared task we use Zarma, the most widely spoken Songhay language. Most of the Songhay languages are predominantly SOV with medium-sized consonant inventories (with implosives), five phonemic vowels, vowel length distinctions, and word level tones, which also are used to distinguish nouns, verbs, and adjectives (Heath, 2014) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 441, |
|
"end": 454, |
|
"text": "(Heath, 2014)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Songhay", |
|
"sec_num": "3.10" |
|
}, |
|
{ |
|
"text": "The Southern Daly is a small language family of the Northern Territory in Australia that consists of two distantly related languages. In the current task we only have one of the languages, Murrinh-patha (which was initially thought to be a language isolate). Murrinh-patha is classified as polysynthetic with highly complex verbal morphology. Verbal roots are surrounded by prefixes and suffixes that indicate tense, mood, object, subject. As Mansfield (2019) notes, Murrinh-patha verbs have 39 conjugation classes.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Southern Daly", |
|
"sec_num": "3.11" |
|
}, |
|
{ |
|
"text": "Tungusic languages are spoken principally in Russia, China and Mongolia. In Russia they are concentrated in north and eastern Siberia and in China in the east, in Manchuria. The largest languages in the family are Xibe, Evenki and Even; we use Evenki in the shared task. The languages are of the agglutinating morphological type with a moderate number of cases, 7 for Xibe and 13 for Evenki. In addition to case markers, Evenki marks possession in nominals (including reflexive possession) and distinguishes between alienable and inalienable possession. In terms of morphophonological processes, the languages exhibit vowel harmony, consonant alternations and phonological vowel length.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tungusic", |
|
"sec_num": "3.12" |
|
}, |
|
{ |
|
"text": "Languages of the Turkic family are primarily spoken in Central Asia. The family is morphologically concatenative, fusional, and suffixing. Turkic languages generally exhibit back vowel harmony, with the notable exception of Uzbek. In addition to harmony in backness, several languages also have labial vowel harmony (e.g., Kyrgyz, Turkmen, among others). In addition, most of the languages have dorsal consonant allophony that accompanies back vowel harmony. Additional morphophonological processes include vowel epenthesis and voicing assimilation. Selection of the inflectional allomorph can frequently be determined from the infinitive morpheme (which frequently reveals vowel backness and roundedness) and also the final segment of the stem.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Turkic", |
|
"sec_num": "3.13" |
|
}, |
|
{ |
|
"text": "The Uralic languages are spoken in Russia from the north of Siberia to Scandinavia and Hungary in Europe. They are agglutinating with some subgroups displaying fusional characteristics (e.g., the S\u00e1mi languages). Many of the languages have vowel harmony. The languages have almost complete suffixal morphology and a medium-sized case inventory, ranging from 5-6 cases to numbers in the high teens. Many of the larger case paradigms are made up of spatial cases, sometimes with distinctions for direction and position. Most of the languages have possessive suffixes, which can express possession, or agreement in non-finite clauses. The paradigms are largely regular, with few, if any, irregular forms. Many exhibit complex patterns of consonant gradation-consonant mutations that occur in specific morphological forms in some stems. Which gradation category a stem belongs to in often unpredictable. The languages spoken in Russia are typically SOV, while those in Europe have SVO order.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Uralic", |
|
"sec_num": "3.14" |
|
}, |
|
{ |
|
"text": "The Uto-Aztecan family is represented by the Tohono O'odham (Papago-Pima) language spoken along the US-Mexico border in southern Arizona and northern Sonora. O'odham is agglutinative with a mixed prefixing and suffixing system. Nominal and verbal pluralization is frequently realized by partial reduplication of the initial consonant and/or vowel, and occasionally by final consonant deletion or null affixation. Processes targeting vowel length (shortening or lengthening) are also present. A small number of verbs exhibit suppletion in the past tense.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Uto-Aztecan", |
|
"sec_num": "3.15" |
|
}, |
|
{ |
|
"text": "Similar to previous years, training and development sets contain triples consisting of a lemma, a target form, and morphosyntactic descriptions (MSDs, or morphological tags). 3 Test sets only contain two fields, i.e., target forms are omitted. All data follows UTF-8 encoding.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Format", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "A significant amount of data for this task was extracted from corresponding (language-specific) grammars. In order to allow cross-lingual comparison, we manually converted their features (tags) into the UniMorph format (Sylak-Glassman, 2016). We then canonicalized the converted language data 4 to make sure all tags are consistently ordered and no category (e.g., \"Number\") is assigned two tags (e.g., singular and plural). 5", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conversion and Canonicalization", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We use only noun, verb, and adjective forms to construct training, development, and evaluation sets. We de-duplicate annotations such that there are no multiple examples of exact lemma-formtag matches. To create splits, we randomly sample 70%, 10%, and 20% for train, development, and test, respectively. We cap the training set size to 100k examples for each language; where languages exceed this (e.g., Finnish), we subsample to this point, balancing lemmas such that all forms for a given lemma are either included or discarded. Some languages such as Zarma (dje), Tajik (tgk), Lingala (lin), Ludian* (lud), M\u0101ori (mao), Sotho (sot), V\u00f5ro (vro), Anglo-Norman (xno), and Zulu (zul) contain less than 400 training samples and are extremely low-resource. 6 Tab. 6 and Tab. 7 in the Appendix provide the number of samples for every language in each split, the number of samples per lemma, and statistics on inconsistencies in the data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 755, |
|
"end": 756, |
|
"text": "6", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Splitting", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The organizers provided two types of pre-trained baselines. Their use was optional.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline Systems", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The first baseline was a non-neural system that had been used as a baseline in earlier shared tasks on morphological reinflection (Cotterell et al., 2017 (Cotterell et al., , 2018 . The system first heuristically extracts lemma-to-form transformations; it assumes that these transformations are suffix-or prefix-based. 4 Using the UniMorph schema canonicalization script https://github.com/unimorph/umcanonicalize 5 Conversion schemes and canonicalization scripts are available at https://github.com/ sigmorphon2020/task0-data A simple majority classifier is used to apply the most frequent suitable transformation to an input lemma, given the morphological tag, yielding the output form. See Cotterell et al. (2017) for further details.", |
|
"cite_spans": [ |
|
{ |
|
"start": 130, |
|
"end": 153, |
|
"text": "(Cotterell et al., 2017", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 154, |
|
"end": 179, |
|
"text": "(Cotterell et al., , 2018", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 319, |
|
"end": 320, |
|
"text": "4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 693, |
|
"end": 716, |
|
"text": "Cotterell et al. (2017)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Non-neural", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Neural baselines were based on a neural transducer (Wu and Cotterell, 2019) , which is essentially a hard monotonic attention model (mono-*). The second baseline is a transformer (Vaswani et al., 2017) adopted for character-level tasks that currently holds the state-of-the-art on the 2017 SIG-MORPHON shared task data (Wu et al., 2020, trm-*). Both models take the lemma and morphological tags as input and output the target inflection. The baseline is further expanded to include the data augmentation technique used by Anastasopoulos and Neubig (2019, -aug-) (conceptually similar to the one proposed by Silfverberg et al. 2017). Relying on a simple characterlevel alignment between lemma and form, this technique replaces shared substrings of length > 3 with random characters from the language's alphabet, producing hallucinated lemma-tag-form triples. Both neural baselines were trained in mono-(*-single) and multilingual (shared parameters among the same family, *-shared) settings.", |
|
"cite_spans": [ |
|
{ |
|
"start": 51, |
|
"end": 75, |
|
"text": "(Wu and Cotterell, 2019)", |
|
"ref_id": "BIBREF82" |
|
}, |
|
{ |
|
"start": 179, |
|
"end": 201, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF81" |
|
}, |
|
{ |
|
"start": 319, |
|
"end": 336, |
|
"text": "(Wu et al., 2020,", |
|
"ref_id": "BIBREF83" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "As Tab. 3 shows, 10 teams submitted 22 systems in total, out of which 19 were neural. Some teams such as ETH Zurich and UIUC built their models on top of the proposed baselines. In particular, ETH Zurich enriched each of the (multilingual) neural baseline models with exact decoding strategy that uses Dijkstra's search algorithm. UIUC enriched the transformer model with synchronous bidirectional decoding technique (Zhou et al., 2019) in order to condition the prediction of an affix character on its environment from both sides. (The authors demonstrate positive effects in Oto-Manguean, Turkic, and some Austronesian languages.)", |
|
"cite_spans": [ |
|
{ |
|
"start": 417, |
|
"end": 436, |
|
"text": "(Zhou et al., 2019)", |
|
"ref_id": "BIBREF87" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Competing Systems", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "A few teams further improved models that were among top performers in previous shared tasks. IMS and Flexica re-used the hard monotonic attention model from (Aharoni and Goldberg, 2017) . IMS developed an ensemble of two models (with left-to-right and right-to-left generation or-der) with a genetic algorithm for ensemble search (Haque et al., 2016) and iteratively provided hallucinated data. Flexica submitted two neural systems. The first model (flexica-02-1) was multilingual (family-wise) hard monotonic attention model with improved alignment strategy. This model is further improved (flexica-03-1) by introducing a data hallucination technique which is based on phonotactic modelling of extremely low-resource languages (Shcherbakov et al., 2016) . LTI focused on their earlier model (Anastasopoulos and Neubig, 2019) , a neural multi-source encoder-decoder with two-step attention architecture, training it with hallucinated data, cross-lingual transfer, and romanization of scripts to improve performance on low-resource languages. DeepSpin reimplemented gated sparse two-headed attention model from Peters and Martins (2019) and trained it on all languages at once (massively multilingual). The team experimented with two modifications of the softmax function: sparsemax (Martins and Astudillo, 2016, deepspin-02-1) and 1.5-entmax , deepspin-01-1).", |
|
"cite_spans": [ |
|
{ |
|
"start": 157, |
|
"end": 185, |
|
"text": "(Aharoni and Goldberg, 2017)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 728, |
|
"end": 754, |
|
"text": "(Shcherbakov et al., 2016)", |
|
"ref_id": "BIBREF73" |
|
}, |
|
{ |
|
"start": 792, |
|
"end": 825, |
|
"text": "(Anastasopoulos and Neubig, 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 1282, |
|
"end": 1311, |
|
"text": "(Martins and Astudillo, 2016,", |
|
"ref_id": "BIBREF58" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Competing Systems", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Many teams based their models on the transformer architecture.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Competing Systems", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "NYU-CUBoulder experimented with a vanilla transformer model (NYU-CUBoulder-04-0), a pointer-generator transformer that allows for a copy mechanism (NYU-CUBoulder-02-0), and ensembles of three (NYU-CUBoulder-01-0) and five (NYU-CUBoulder-03-0) pointer-generator transformers. For languages with less than 1,000 training samples, they also generate hallucinated data. CULing developed an ensemble of three (monolingual) transformers with identical architecture but different input data format. The first model was trained on the initial data format (lemma, target tags, target form). For the other two models the team used the idea of lexeme's principal parts (Finkel and Stump, 2007) and augmented the initial input (that only used the lemma as a source form) with entries corresponding to other (non-lemma) slots available for the lexeme. The CMU Tartan team compared performance of models with transformer-based and LSTM-based encoders and decoders. The team also compared monolingual to multilingual training in which they used several (related and unrelated) high-resource languages for low-resource language training. were neural, some teams experimented with nonneural approaches showing that in certain scenarios they might surpass neural systems. A large group of researchers from CU7565 manually developed finite-state grammars for 25 languages (CU7565-01-0). They additionally developed a non-neural learner for all languages (CU7565-02-0) that uses hierarchical paradigm clustering (based on similarity of string transformation rules between inflectional slots). Another team, Flexica, proposed a model (flexica-01-0) conceptually similar to Hulden et al. (2014) , although they did not attempt to reconstruct the paradigm itself and treated transformation rules independently assigning each of them a score based on its frequency and specificity as well as diversity of the characters surrounding the pattern. 7", |
|
"cite_spans": [ |
|
{ |
|
"start": 658, |
|
"end": 682, |
|
"text": "(Finkel and Stump, 2007)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 1652, |
|
"end": 1672, |
|
"text": "Hulden et al. (2014)", |
|
"ref_id": "BIBREF35" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Competing Systems", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "This year, we instituted a slightly different evaluation regimen than in previous years, which takes into account the statistical significance of differences between systems and allows for an informed comparison across languages and families better than a simple macro-average.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The process works as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "1. For each language, we rank the systems according to their accuracy (or Levenshtein distance). To do so, we use paired bootstrap resampling (Koehn, 2004) 8 to only take statistically significant differences into account. That way, any system which is the same (as assessed via statistical significance) as the best performing one is also ranked 1 st for that language. re-rank them based on the amount of times they ranked 1 st , 2 nd , 3 rd , etc. Table 4 illustrates an example of this process using four Zapotecan languages and six systems.", |
|
"cite_spans": [ |
|
{ |
|
"start": 142, |
|
"end": 155, |
|
"text": "(Koehn, 2004)", |
|
"ref_id": "BIBREF53" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 451, |
|
"end": 458, |
|
"text": "Table 4", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "uiuc (1) CULing (1) deepspin (1) NYU-CUB (1) uiuc 1 4 trm-single (1) uiuc (1) uiuc (1) CULing (1) trm-single 1 4 CULing (3) trm-single (1) IMS (1) deepspin (1) CULing 1.5 3 1 deepspin (3) IMS (4) NYU-CUB (1) uiuc (1) deepspin 2.25 2 1 1 NYU-CUB (3) deepspin (4) CULing (1) trm-single (1) NYU-CUB 2.25 2 1 1 IMS (6) NYU-CUB (4) trm-single (1) IMS (1) IMS 3 2 0 1 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "This year we had four winning systems (i.e., ones that outperform the best baseline): CULing-01-0, deepspin-02-1, uiuc-01-0, and deepspin-01-1, all neural. As Tab. 5 shows, they achieve over 90% accuracy. Although CULing-01-0 and uiuc-01-0 are both monolingual transformers that do not use any hallucinated data, they follow different strategies to improve performance. The strategy proposed by CULing-01-0 of enriching the input data with extra entries that included non-lemma forms and their tags as a source form, enabled their system to be among top performers on all language families; uiuc-01-0, on the other hand, did not modify the data but rather changed the decoder to be bidirectional and made family-wise fine-tuning of each (monolingual) model. The system is also among the top performers on all language families except Iranian. The third team, DeepSpin, trained and fine-tuned their models on all language data. Both models are ranked high (although the sparsemax model, deepspin-02-1, performs better overall) on most language groups with exception of Algic. Sparsemax was also found useful by CMU-Tartan. The neural ensemble model with data augmentation from IMS team shows superior performance on languages with smaller data sizes (under 10,000 samples). LTI and Flexica teams also observed positive effects of multilingual training and data hallucination on low-resource languages. The latter was also found useful in the ablation study made by NYU-CUBoulder team. Several teams aimed to address particular research questions; we will further summarize their results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Rank Acc Is developing morphological grammars manually worthwhile? This was the main question asked by CU7565 who manually designed finitestate grammars for 25 languages. Paradigms of some languages were relatively easy to describe but neural networks also performed quite well on them even with a limited amount of data. For lowresource languages such as Ingrian and Tagalog the grammars demonstrate superior performance but this comes at the expense of a significant amount of person-hours.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "What is the best training strategy for lowresource languages? Teams that generated hallucinated data highlighted its utility for lowresource languages. Augmenting the data with tuples where lemmas are replaced with nonlemma forms and their tags is another technique that was found useful. In addition, multilingual training and ensembles yield extra gain in terms of accuracy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To address this question, we evaluate oracle scores for baseline systems, submitted systems, and all of them together. Typically, as Tables 8-21 in the Appendix demonstrate, the baselines and the submissions are complementary -adding them together increases the oracle score. Furthermore, while the full systems tend to dominate the partial systems (that were designed for a subset of languages, such as CU7565-01-0), there are a number of cases where the partial systems find the solution when the full systems don't -and these languages often then get even bigger gains when combined with the baselines. This even happens when the accuracy of the baseline is very high -Finnish has baseline oracle of 99.89; full systems oracle of 99.91; submission oracle of 99.94 and complete oracle of 99.96, so an ensemble might be able to improve on the results. The largest gaps in oracle systems are observed in Algic, Oto-Manguean, Sino-Tibetan, Southern Daly, Tungusic, and Uto-Aztecan families. 9", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Are the systems complementary?", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Has morphological inflection become a solved problem in certain scenarios? The results shown in Fig. 2 suggest that for some of the development language families, such as Austronesian and Niger-Congo, the task was relatively easy, with most systems achieving high accuracy, whereas the task was more difficult for Uralic and Oto-Manguean languages, which showed greater variability in level of performance across submitted systems. Languages such as Ludic (lud), Norwegian Nynorsk (nno), Middle Low German 1 3 6 7 1 5 0 9 2 3 3 9 1 0 8 1 3 4 6 4 5 9 0 1 5 8 5 7 3 9 9 8 8 7 0 1 6 5 1 1 7 9 9 1 9 6 2 4 8 2 3 8 9 5 3 9 7 4 5 4 4 4 2 9 3 3 3 1 3 5 9 6 2 1 1 5 2 0 2 2 6 0 2 7 0 1 7 5 8 4 6 6 3 1 1 3 4 9 1 2 3 5 0 9 8 0 2 4 8 4 4 7 1 6 7 0 4 1 4 9 1 6 3 7 2 1 3 3 7 6 2 2 9 4 0 1 4 5 6 2 7 2 3 8 7 6 9 4 9 2 0 1 4 8 4 1 1 1 0 9 4 0 1 4 1 2 1 0 8 1 7 2 5 7 8 3 0 2 7 4 6 2 5 5 1 2 5 4 4 2 3 0 1 6 3 5 7 7 5 0 6 7 2 0 6 7 0 3 2 4 3 5 1 5 4 5 1 6 8 6 8 8 1 1 2 4 1 7 2 3 8 4 2 3 6 1 0 3 6 8 3 5 3 9 7 4 8 2 0 8 9 1 0 8 5 1 5 5 5 9 0 8 1 3 4 8 2 4 4 1 7 7 1 4 4 9 9 6 8 4 1 3 0 6 3 5 3 9 3 1 2 6 3 5 0 2 0 9 7 3 6 2 6 8 0 4 3 4 9 5 9 6 8 5 2 2 8 4 2 2 4 4 4 0 3 6 4 2 7 5 9 8 0 2 4 9 3 9 0 2 9 3 3 6 4 4 1 9 8 9 0 9 3 7 3 2 2 1 6 4 7 2 5 7 8 6 7 7 8 6 1 4 6 8 2 2 7 4 7 1 8 5 3 4 8 6 4 9 3 6 4 3 (gml), Evenki (evn), and O'odham (ood) seem to be the most challenging languages based on simple accuracy. For a more fine-grained study, we have classified test examples into four categories: \"very easy\", \"easy\", \"hard\", and \"very hard\". \"Very easy\" examples are ones that all submitted systems got correct, while \"very hard\" examples are ones that no submitted system got correct. \"Easy\" examples were predicted correctly for 80% of systems, and \"hard\" were only correct in 20% of systems. Fig. 3, Fig. 4 , and Fig. 5 represent percentage of noun, verb, and adjective samples that fall into each category and illustrate that most language samples are correctly predicted by majority of the systems. For noun declension, Old English (ang), Middle Low German (gml), Evenki (evn), O'odham (ood), V\u00f5ro (vro) are the most difficult (some of this difficulty comes from language data inconsistency, as described in the following section). For adjective declension, Classic Syriac presents the highest difficulty (likely due to its limited data).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 96, |
|
"end": 102, |
|
"text": "Fig. 2", |
|
"ref_id": "FIGREF2" |
|
}, |
|
{ |
|
"start": 1782, |
|
"end": 1796, |
|
"text": "Fig. 3, Fig. 4", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 1803, |
|
"end": 1809, |
|
"text": "Fig. 5", |
|
"ref_id": "FIGREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Are the systems complementary?", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "14 3 3 2 6 9 4 7 1 1 0 2 4 5 4 1 0 7 2 1 3 4 5 1 8 0 7 9 7 1 2 3 0 2 5 0 5 4 2 8 9 2 4 8 4 1 0 2 0 1 9 1 5 2 2 6 6 9 8 3 8 2 1 4 4 6 1 8 3 ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Are the systems complementary?", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In our error analysis we follow the error type taxonomy proposed in Gorman et al. (2019) . First, we evaluate systematic errors due to inconsistencies in the data, followed by an analysis of whether having seen the language or its family improved accuracy. We then proceed with an overview of accuracy for each of the language families. For a select number of families, we provide a more detailed analysis of the error patterns. Tab. 6 and Tab. 7 provide the number of samples in the training, development, and test sets, percentage of inconsistent entries (the same lemma-tag pair has multiple infected forms) in them, percentage of contradicting entries (same lemma-tag pair occurring in train and development or test sets but assigned to different inflected forms), and percentage of entries in the development or test sets containing a lemma observed in the training set. The train, development and test sets contain 2%, 0.3%, and 0.6% inconsistent entries, respectively. Azerbaijani (aze), Old English (ang), Cree (cre), Danish (dan), Middle Low German (gml), Kannada (kan), Norwegian Bokm\u00e5l (nob), Chichimec (pei), and Veps (vep) had the highest rates of inconsistency. These languages also exhibit the highest percentage of contradicting entries. The inconsistencies in some Finno-Ugric languages (such as Veps and Ludic) are due to dialectal variations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 68, |
|
"end": 88, |
|
"text": "Gorman et al. (2019)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "The overall accuracy of system and language pairings appeared to improve with an increase in the size of the dataset ( Fig. 6 ; see also Fig. 7 for accuracy trends by language family and Fig. 8 for accuracy trends by system). Overall, the variance was considerable regardless of whether the language family or even the language itself had been observed during the Development Phase. A linear mixed-effects regression was used to assess variation in accuracy using fixed effects of language category, the size of the training dataset (log count), and their interactions, as well as random intercepts for system and language family accuracy. 10 Language category was sum-coded with three levels: development language-development family, surprise language-development family, or surprise language-surprise family.", |
|
"cite_spans": [ |
|
{ |
|
"start": 640, |
|
"end": 642, |
|
"text": "10", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 119, |
|
"end": 125, |
|
"text": "Fig. 6", |
|
"ref_id": "FIGREF7" |
|
}, |
|
{ |
|
"start": 137, |
|
"end": 143, |
|
"text": "Fig. 7", |
|
"ref_id": "FIGREF9" |
|
}, |
|
{ |
|
"start": 187, |
|
"end": 193, |
|
"text": "Fig. 8", |
|
"ref_id": "FIGREF10" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "A significant effect of dataset size was observed, such that a one unit increase in log count corresponded to a 2% increase in accuracy (\u03b2 = 0.019, p < 0.001). Language category type also significantly influenced accuracy: both development languages and surprise languages from development families were less accurate on average (\u03b2 dev\u2212dev = -0.145, \u03b2 sur\u2212dev = -0.167, each p < 0.001). These main effects were, however, significantly modulated by interactions with dataset size: on top of the main effect of dataset size, accuracy for development languages increased an additional \u2248 1.7% (\u03b2 dev\u2212dev\u00d7size = 0.017, p < 0.001) and accuracy for surprise languages from development families increased an additional \u2248 2.9% (\u03b2 sur\u2212dev\u00d7size = 0.029, p < 0.001).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "This family was represented by three languages. Mean accuracy across systems was above average at 91.7%. Relative to other families, variance in accuracy was low, but nevertheless ranged from 41.1% to 99.0%.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Afro-Asiatic:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Algic: This family was represented by one language, Cree. Mean accuracy across systems was below average at 65.1%. Relative to other families, variance in accuracy was low, ranging from 41.5% to 73%. All systems appeared to struggle with the choice of preverbal auxiliary. Some auxiliaries were overloaded: 'kitta' could refer to future, imperfective, or imperative. The morphological features for mood and tense were also frequently combined, such as SBJV+OPT (subjunctive plus optative mood). While the paradigms were very large, there were very few lemmas (28 impersonal verbs and 14 transitive verbs), which may have contributed to the lower accuracy. Interestingly, the inflections could largely be generated by rules. 11", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Afro-Asiatic:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Austronesian: This family was represented by five languages. Mean accuracy across systems was around average at 80.5%. Relative to other families, variance in accuracy was high, with accuracy ranging from 39.5% to 100%. One may notice a discrepancy among the difficulty in processing different Austronesian languages. For instance, we see a difference of over 10% in the baseline performance of Cebuano (84%) and Hiligaynon (96%). 12 This could come from the fact that Cebuano only has partial reduplication while Hiligaynon has full reduplication. Furthermore, the prefix choice for Cebuano is more irregular, making it more difficult to predict the correct conjugation of the verb.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Afro-Asiatic:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Dravidian: This family was represented by two languages: Kannada and Telugu. Mean accuracy across systems was around average at 82.2%. Relative to other families, variance in accuracy was high: system accuracy ranged from 44.6% to 96.0%. Accuracy for Telugu was systematically higher than accuracy for Kannada.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Afro-Asiatic:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This family was represented by 29 languages and four main branches. Mean accuracy across systems was slightly above average at 86.9%. Relative to other families, variance in accuracy was very high: system accuracy ranged from 0.02% to 100%. For Indo-Aryan, mean accuracy was high (96.0%) with low variance; for Germanic, mean accuracy was slightly below average (79.0%) but with very high variance (ranging from 0.02% to 99.5%), for Romance, mean accuracy was high (93.4%) but also had a high variance (ranging from 23.5% to 99.8%), and for Iranian, mean accuracy was high (89.2%), but again with a high variance (ranging from 25.0% to 100%). Languages from the Germanic branch of the Indo-European family were included in the Development Phase.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Indo-European:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Niger-Congo: This family was represented by ten languages. Mean accuracy across systems was very good at 96.4%. Relative to other families, variance in accuracy was low, with accuracy ranging from 62.8% to 100%. Most languages in this family are considered low resource, and the resources used for data gathering may have been biased towards the languages' regular forms, as such this high accuracy may not be representative of the \"easiness\" of the task in this family. Languages from the Niger-Congo family was included in the Development Phase.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Indo-European:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Oto-Manguean: This family was represented by nine languages. Mean accuracy across systems was slightly below average at 78.5%. Relative to other families, variance in accuracy was high, with accuracy ranging from 18.7% to 99.1%. Languages from the Oto-Manguean family were included in the Development Phase.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Indo-European:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This family was represented by one language, Bodic. Mean accuracy across systems was average at 82.1%, and variance across systems was also very low. Accuracy ranged from 67.9% to 85.1%. The results are similar to those in Di et al. (2019) where majority of errors relate to allomorphy and impossible combinations of Tibetan unit components. above average at 89.4%, and variance across systems was also low, despite the range from 0% to 95.7%. Dakota presented variable prefixing and infixing of person morphemes, along some complexities related to fortition processes. Determining the factor(s) that governed variation in affix position was difficult from a linguist's perspective, though many systems were largely successful. Success varied in the choice of the first or second person singular allomorphs which had increasing degrees of consonant strengthening (e.g., /wa/, /ma/, /mi/ /bde/, /bdu/ for the first person singular and /ya/, /na/, /ni/, /de/, or /du/ for the second person singular). In some cases, these fortition processes were overapplied, and in some cases, entirely missed.", |
|
"cite_spans": [ |
|
{ |
|
"start": 223, |
|
"end": 239, |
|
"text": "Di et al. (2019)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sino-Tibetan:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Songhay: This family was represented by one language, Zarma. Mean accuracy across systems was above average at 88.6%, and variance across systems was relatively high. Accuracy ranged from 0% to 100%.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sino-Tibetan:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Southern Daly: This family was represented by one language, Murrinh-Patha. Mean accuracy across systems was below average at 73.2%, and variance across systems was relatively high. Accuracy ranged from 21.2% to 91.9%.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sino-Tibetan:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Tungusic: This family was represented by one language, Evenki. The overall accuracy was the lowest across families. Mean accuracy was 53.8% with very low variance across systems. Accuracy ranged from 43.5% to 59.0%. The low accuracy is due to several factors. Firstly and primarily, the dataset was created from oral speech samples in various dialects of the language. The Evenki language is known to have rich dialectal variation. Moreover, there was little attempt at any standardization in the oral speech transcription. These peculiarities led to a high number of errors. For instance, some of the systems synthesized a wrong plural form for a noun ending in /-n/. Depending on the dialect, it can be /-r/ or /-l/, and there is a trend to have /-hVl/ for borrowed nouns. Deducing such a rule as well as the fact that the noun is a loanword is a hard task. Other suffixes may also have variable forms (such as /-kVllu/ vs /-kVldu/ depending on the dialect for the 2PL imperative. Some verbs have irregular past tense forms depending on the dialect and the meaning of the verb (e. g. /o:-/ 'to make' and 'to become'). Next, various dialects exhibit various vowel and consonant changes in suffixes. For example, some dialects (but not all of them) change /w/ to /b/ after /l/, and the systems sometimes synthesized a wrong form. The vowel harmony is complex: not all suffixes obey it, and it is also dialect-dependent. Some suffixes have variants (e. g., /-sin/ and /-s/ for SEMEL (semelfactive)), and the choice between them might be hard to understand. Finally, some of the mistakes are due to the markup scheme scarcity. For example, various past tense forms are all annotated as PST, or there are several comitative suffixes all annotated as COM. Moreover, some features are present in the word form but they receive no annotation at all. It is worth mentioning that some of the predictions could theoretically be possible. To sum up, the Evenki case presents the chal-lenges of oral non-standardized speech.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sino-Tibetan:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Turkic: This family was represented by nine languages. Mean accuracy across systems was relatively high at 93%, and relative to other families, variance across systems was low. Accuracy ranged from 51.5% to 100%. Accuracy was lower for Azerbaijani and Turkmen, which after closer inspection revealed some slight contamination in the 'gold' files. There was very marginal variation in the accuracy for these languages across systems. Besides these two, accuracies were predominantly above 98%. A few systems struggled with the choice and inflection of the postverbal auxiliary in various languages (e.g., Kyrgyz, Kazakh, and Uzbek).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sino-Tibetan:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Uralic: This family was represented by 16 languages. Mean accuracy across systems was average at 81.5%, but the variance across systems and languages was very high. Accuracy ranged from 0% to 99.8%. Languages from the Uralic family were included in the Development Phase.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sino-Tibetan:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Uto-Aztecan: This family was represented by one language, O'odham. Mean accuracy across systems was slightly below average at 76.4%, but the variance across systems and languages was fairly low. Accuracy ranged from 54.8% to 82.5%. The systems with higher accuracy may have benefited from better recall of suppletive forms relative to lower accuracy systems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sino-Tibetan:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This years's shared task on morphological reinflection focused on building models that could generalize across an extremely typologically diverse set of languages, many from understudied language families and with limited available text resources. As in previous years, neural models performed well, even in relatively low-resource cases. Submissions were able to make productive use of multilingual training to take advantage of commonalities across languages in the dataset. Data augmentation techniques such as hallucination helped fill in the gaps and allowed networks to generalize to unseen inputs. These techniques, combined with architecture tweaks like sparsemax, resulted in excellent overall performance on many languages (over 90% accuracy on average). However, the task's focus on typological diversity revealed that some morphology types and language families (Tungusic, Oto-Manguean, South-ern Daly) remain a challenge for even the best systems. These families are extremely low-resource, represented in this dataset by few or a single language. This makes cross-linguistic transfer of similarities by multilanguage training less viable. They may also have morphological properties and rules (e.g., Evenki is agglutinating with many possible forms for each lemma) that are particularly difficult for machine learners to induce automatically from sparse data. For some languages (Ingrian, Tajik, Tagalog, Zarma, and Lingala), optimal performance was only achieved in this shared task by hand-encoding linguist knowledge in finite state grammars. It is up to future research to imbue models with the right kinds of linguistic inductive biases to overcome these challenges. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "10" |
|
}, |
|
{ |
|
"text": "Rank Acc deepspin-02-1 2.3 92.9 uiuc-01-0 3.1 91.6 deepspin-01-1 2.9 92.9 BASE: trm-single 2.9 91.7 CULing-01-0 3.9 93.5 BASE: trm-aug-single 3.4 92.9 NYU-CUBoulder-04-0 7.3 90.7 BASE: trm-shared 12.0 86.9 cmu_tartan_00-1 8.1 88.6 BASE: mono-shared 8.9 90.3 NYU-CUBoulder-03-0 10.0 91.2 cmu_tartan_00-0 8.9 88.5 NYU-CUBoulder-02-0 11.4 90.6 BASE: mono-aug-shared 12.9 90.5 NYU-CUBoulder-01-0 12.4 90.4 BASE: mono-single 8.1 88.0 BASE: mono-aug-single 7.9 91.9 cmu_tartan_01-0 10.5 88.6 cmu_tartan_01-1 9.9 88.5 IMS-00-0 15.9 90.4 cmu_tartan_02-1 10.7 88. 1.8 92.0 CULing-01-0 3.5 91.9 deepspin-02-1 6.7 91.3 deepspin-01-1 6.7 91.1 NYU-CUBoulder-04-0 5.5 90.4 BASE: mono-single 5.1 90.9 NYU-CUBoulder-02-0 6.8 90.6 NYU-CUBoulder-03-0 6.8 90.5 cmu_tartan_01-1 7.2 91.0 cmu_tartan_00-1 6.6 90.8 BASE: mono-aug-single 7.3 90.7 BASE: trm-shared 7.7 91.3 cmu_tartan_02-1 7.4 90.8 NYU-CUBoulder-01-0 8.9 90.5 BASE: trm-aug-shared 9.3 91.1 cmu_tartan_00-0 9.7 90.9 cmu_tartan_01-0 11.8 90.7 ETHZ-00-1 16.6 88.9 IMS-00-0 11.2 91.0 BASE: mono-shared 15.1 88.9 flexica-02-1 13.1 89.7 LTI-00-1 17.1 83.3 flexica-03-1 17.0 88.6 BASE: mono-aug-shared 19.5 86.3 CU7565-02-0 21.6 85.9 ETHZ-02-1 17.5 88.6 *CU7565-01-0 29.1 96.4 flexica-01-1 28.9 72.4", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Oracle ( ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Rank Acc uiuc-01-0 1.0 82.5 NYU-CUBoulder-01-0 1.0 82.2 NYU-CUBoulder-02-0 1.0 81.8 NYU-CUBoulder-03-0 1.0 81.5 IMS-00-0 1.0 81.5 BASE: trm-single 1.0 80.9 CULing-01-0 1.0 80.9 BASE: trm-shared 1.0 80.9 deepspin-02-1 1.0 80.6 NYU-CUBoulder-04-0 1.0 79.6 ETHZ-00-1 1.0 79.3 LTI-00-1 1.0 79.0 deepspin-01-1 1.0 79.0 BASE: trm-aug-single 14.0 78.0 BASE: trm-aug-shared 14.0 78.0 flexica-02-1 14.0 77.7 BASE: mono-aug-single 14.0 77.4 BASE: mono-aug-shared 14.0 77.4 cmu_tartan_00-0 14.0 76.1 cmu_tartan_00-1 14. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Rank Acc uiuc-01-0 1.0 95.6 CULing-01-0 1.0 94.9 deepspin-02-1 5.0 93.3 BASE: trm-single 5.5 93.9 BASE: trm-aug-single 5.5 93.1 deepspin-01-1 5.5 92.5 NYU-CUBoulder-01-0 5.5 92.4 NYU-CUBoulder-02-0 5.5 92.3 NYU-CUBoulder-04-0 14.0 92.0 BASE: mono-aug-shared 9.0 91.3 BASE: mono-single 9.0 90.2 cmu_tartan_00-0 9.0 90.0 cmu_tartan_01-1 13.5 85.4 cmu_tartan_01-0 13.5 85.2 cmu_tartan_02-1 14.5 72.3 ETHZ-00-1 9.5 92.5 BASE: trm-aug-shared 9.5 91.8 BASE: trm-shared 9.5 91.7 IMS-00-0 9.5 91.7 BASE: mono-aug-single 9.5 90.9 CU7565-02-0 9.5 90.6 NYU-CUBoulder-03-0 18.0 91.2 LTI-00-1 13.5 90.1 flexica-02-1 13.5 90.1 ETHZ-02-1 13.5 89.5 flexica-03-1 13.5 89.2 cmu_tartan_00-1 13.5 89.0 BASE: mono-shared 13.5 88. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Rank Acc uiuc-01-0 1.0 98.2 deepspin-02-1 1.5 98.1 deepspin-01-1 1.5 98.0 BASE: trm-single 1.5 97.9 BASE: trm-aug-single 1.5 97.8 BASE: trm-shared 2.8 97.9 CULing-01-0 7.5 98.0 BASE: mono-single 6.0 97.6 NYU-CUBoulder-04-0 5.0 97.7 cmu_tartan_02-1 7.8 97.4 cmu_tartan_00-1 7.0 97.4 BASE: mono-shared 7.0 97.3 cmu_tartan_01-1 7.8 97.3 cmu_tartan_00-0 8.8 97.1 NYU-CUBoulder-03-0 8.5 97.4 NYU-CUBoulder-02-0 9.2 97.4 NYU-CUBoulder-01-0 9.2 97.3 BASE: trm-aug-shared 11.0 97.7 BASE: mono-aug-single 9.5 97.2 flexica-03-1 9.5 97.1 flexica-02-1 11.0 96.8 ETHZ-02-1 11.5 97.4 ETHZ-00-1 13.8 96. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Note that many languages exhibit no inflectional morphology e.g., Mandarin Chinese, Yoruba, etc.:Bickel and Nichols (2013a).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The data splits are available at https://github.com/ sigmorphon2020/task0-data/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Each MSD is a set of features separated by semicolons.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We also note that Ludian contained inconsistencies in data due to merge of various dialects.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "English plural noun formation rule \"* \u2192 *s\" has high diversity whereas past tense rule such as \"*a* \u2192 *oo*\" as in(understand, understood) has low diversity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": ". For the set of languages where we want collective results (e.g. languages within a linguistic genus), we aggregate the systems' ranks and8 We use 10,000 samples with 50% ratio, and p < 0.005.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Please see the results per language here:https://docs.google.com/spreadsheets/ d/1ODFRnHuwN-mvGtzXA1sNdCi-jNqZjiE-i9jRxZCK0kg/edit?usp=sharing", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Accuracy should ideally be assessed at the trial level using a logistic regression as opposed to a linear regression. Bytrial accuracy was however not available at analysis time.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Minor issues with the encoding of diacritics were identified, and will be corrected for release.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": " 12 We also note that some Hiligaynon entries contained multiple lemma forms (\"bati/batian/pamatian\") for a single entry. We decided to leave it since we could not find any more information on which of the lemmas should be selected as the main. A similar issue was observed in Chichicapan Zapotec.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "acknowledgement", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We would like to thank each of the participants for their time and effort in developing their task systems. We also thank Jason Eisner for organization and guidance. We thank Vitalij Chernyavskij for his help with V\u00f5ro and Umida Boltaeva and Bahriddin Abdiev for their contribution in Uzbek data annotation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Rank Acc NYU-CUBoulder-01-0 1.0 95.7 BASE: trm-single 1.0 95.6 CULing-01-0 1.0 95.6 BASE: trm-shared 1.0 95.6 ETHZ-00-1 1.0 95.5 uiuc-01-0 1.0 94.9 deepspin-01-1 1.0 94.8 NYU-CUBoulder-02-0 1.0 94.8 NYU-CUBoulder-03-0 1.0 94.7 deepspin-02-11.0 94.5 BASE: mono-aug-shared 1.0 94.4 BASE: mono-aug-single 1.0 94.4 NYU-CUBoulder-04-0 1.0 94.3 ETHZ-02-1 14.0 93.3 BASE: mono-single 14.0 92.9 BASE: mono-shared 14.0 92.9 BASE: trm-aug-single 14.0 92.5 BASE: trm-aug-shared 14.0 92.5 flexica-02-1 14.0 91.5 IMS-00-0 14.0 90.9 LTI-00-1 21.0 89.7 flexica-03-1 21.0 89.3 cmu_tartan_01-0 23.0 85.7 cmu_tartan_01-1 23.0 85.7 cmu_tartan_02-1 23.0 85.7 cmu_tartan_00-0 23.0 85.5 cmu_tartan_00-1 23.0 85.5 CU7565-02-0 28.0 80.5 flexica-01-1 29.0 58. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Rank Acc BASE: mono-aug-single 1.0 100.0 BASE: trm-aug-single 1.0 100.0 CU7565-02-0 1.0 100.0 CU7565-01-0 1.0 100.0 uiuc-01-0 1.0 100.0 NYU-CUBoulder-02-0 1.0 100.0 NYU-CUBoulder-03-0 1.0 100.0 BASE: mono-aug-shared 1.0 100.0 NYU-CUBoulder-01-0 1.0 100.0 LTI-00-1 1.0 100.0 IMS-00-0 1.0 100.0 flexica-01-1 1.0 100.0 deepspin-02-1 1.0 100.0 deepspin-01-1 1.0 100.0 CULing-01-0 1.0 100.0 cmu_tartan_01-1 1.0 100.0 NYU-CUBoulder-04-0 1.0 100.0 BASE: trm-aug-shared 1.0 100.0 flexica-03-1 1.0 93.8 ETHZ-00-1 1.0 93.8 cmu_tartan_02-1 1.0 93.8 cmu_tartan_01-0 1.0 93.8 cmu_tartan_00-0 1.0 87.5 cmu_tartan_00-1 1.0 87.5 BASE: trm-shared 1.0 87.5 BASE: trm-single 1.0 87.5 flexica-02-1 27.0 0.0 BASE: mono-shared 27.0 0. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Rank Acc uiuc-01-0 1.0 97.7 IMS-00-0 1.0 97.6 CULing-01-0 1.0 96.9 NYU-CUBoulder-01-0 1.4 97.9 NYU-CUBoulder-02-01.4 97.9 NYU-CUBoulder-03-0 1.4 97.9 deepspin-02-11.4 97.6 BASE: mono-aug-single 1.4 97. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Rank Acc BASE: mono-shared 1.0 100.0 BASE: mono-single 1.0 100.0 CU7565-01-0 1.0 100.0 IMS-00-0 1.0 100.0 deepspin-02-1 1.0 100.0 deepspin-01-1 1.0 100.0 flexica-03-1 1.0 99.9 BASE: trm-shared 1.0 99.9 BASE: mono-aug-single 1.0 99.9 cmu_tartan_00-0 1.0 99.9 BASE: trm-aug-shared 1.0 99.9 BASE: trm-aug-single 1.0 99.7 cmu_tartan_01-1 1.0 99.7 BASE: mono-aug-shared 1.0 99.6 NYU-CUBoulder-04-0 1.0 99.6 LTI-00-1 1.0 99.5 flexica-02-1 1.0 99.3 cmu_tartan_01-0 1.0 99.3 BASE: trm-single 1.0 98.8 NYU-CUBoulder-01-0 1.0 98.8 NYU-CUBoulder-02-0 1.0 98.8 NYU-CUBoulder-03-0 1.0 98.7 cmu_tartan_02-1 1.0 98.7 uiuc-01-0 1.0 98.5 CULing-01-0 13.0 98.0 cmu_tartan_00-1 13.0 97.7 flexica-01-1 14.5 97.4 CU7565-02-0 15.5 94.9 ETHZ-02-1 27.0 90.4 ETHZ-00-1 28.5 87.9Oracle (Baselines) 100.0 Oracle (Submissions) 100.0 Oracle (All) 100.0 ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "CULing-01-0 1.0 93.9 uiuc-01-0 1.0 93.5 BASE: trm-single 1.0 92.8 deepspin-01-1 2.5 93.1 NYU-CUBoulder-04-0 2.5 93.1 deepspin-02-1 2.5 92.6 NYU-CUBoulder-03-0 2.5 92.5 NYU-CUBoulder-02-0 6.0 92.3 BASE: mono-single 6.0 92.1 NYU-CUBoulder-01-0 6.0 92.0 BASE: mono-aug-single 6.0 91.6 BASE: trm-aug-single 6.0 91.4 IMS-00-0 10.5 91.4 BASE: mono-aug-shared 10.5 90.0 BASE: mono-shared 10.5 89.9 LTI-00-1 13.0 89.6 cmu_tartan_00-1 13.0 87.9 ETHZ-02-115 ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Rank Acc", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Rank Acc deepspin-02-1 1.0 94.0 CULing-01-0 1.0 93.9 BASE: trm-single 1.0 93.9 uiuc-01-0 1.0 93.8 BASE: trm-aug-single 3.5 93.7 deepspin-01-1 3.5 93.6 cmu_tartan_02-1 6.5 93.3 cmu_tartan_00-1 6.5 93.2 cmu_tartan_01-1 6.5 93.2 cmu_tartan_01-0 6.5 93.2 cmu_tartan_00-0 6.5 93.2 BASE: mono-single 9.5 93.0 LTI-00-1 9.5 92.8 BASE: trm-shared 13.5 92.0 BASE: mono-aug-single 14.5 92.3 BASE: trm-aug-shared 15.0 91.9 IMS-00-0 17.0 91.5 NYU-CUBoulder-04-0 18.5 90.8 flexica-03-1 18.5 90.5 flexica-02-1 18.5 90.5 NYU-CUBoulder-03-0 19.5 90.2 NYU-CUBoulder-02-0 19.5 90.2 NYU-CUBoulder-01-0 23.5 89.5 BASE: mono-shared 21.5 88.9 BASE: mono-aug-shared 24.5 87.2 CU7565-02-0 25.5 85.2 flexica-01-1 27.0 82.1 ETHZ-02-1 28.0 73.7 ETHZ-00-1 28.5 67.9 *CU7565-01-0 30.0 0.0 Oracle (Baselines) 97.0 Oracle (Submissions) 97.6 Oracle (All) 98.0 (b) Results on the Mordvin genus (2 languages)", |
|
"cite_spans": [ |
|
{ |
|
"start": 827, |
|
"end": 830, |
|
"text": "(b)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Turkmen Verbs: 100 Turkmen Verbs Conjugated in All Tenses", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Murat Abdulin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Murat Abdulin. 2016. Turkmen Verbs: 100 Turkmen Verbs Conjugated in All Tenses. CreateSpace Inde- pendent Publishing Platform, Online.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Uzbek language: 100 Uzbek verbs conjugated in common tenses", |
|
"authors": [ |
|
{ |
|
"first": "Daniyar", |
|
"middle": [], |
|
"last": "Abdullaev", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniyar Abdullaev. 2016. Uzbek language: 100 Uzbek verbs conjugated in common tenses. CreateSpace In- dependent Publishing Platform, Online.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Morphological inflection generation with hard monotonic attention", |
|
"authors": [ |
|
{ |
|
"first": "Roee", |
|
"middle": [], |
|
"last": "Aharoni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2004--2015", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P17-1183" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roee Aharoni and Yoav Goldberg. 2017. Morphologi- cal inflection generation with hard monotonic atten- tion. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Vol- ume 1: Long Papers), pages 2004-2015, Vancouver, Canada. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Pushing the limits of low-resource morphological inflection", |
|
"authors": [ |
|
{ |
|
"first": "Antonios", |
|
"middle": [], |
|
"last": "Anastasopoulos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graham", |
|
"middle": [], |
|
"last": "Neubig", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "983--995", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Antonios Anastasopoulos and Graham Neubig. 2019. Pushing the limits of low-resource morphological in- flection. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natu- ral Language Processing (EMNLP-IJCNLP), pages 983-995.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "The creation of large-scale annotated corpora of minority languages using UniParser and the EANC platform", |
|
"authors": [ |
|
{ |
|
"first": "Timofey", |
|
"middle": [], |
|
"last": "Arkhangelskiy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oleg", |
|
"middle": [], |
|
"last": "Belyaev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arseniy", |
|
"middle": [], |
|
"last": "Vydrin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "The COLING 2012 Organizing Committee", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "83--92", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Timofey Arkhangelskiy, Oleg Belyaev, and Arseniy Vydrin. 2012. The creation of large-scale annotated corpora of minority languages using UniParser and the EANC platform. In Proceedings of COLING 2012: Posters, pages 83-92, Mumbai, India. The COLING 2012 Organizing Committee.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Kyrgyz Language: 100 Kyrgyz Verbs Fully Conjugated in All Tenses", |
|
"authors": [ |
|
{ |
|
"first": "Alima", |
|
"middle": [], |
|
"last": "Aytnatova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alima Aytnatova. 2016. Kyrgyz Language: 100 Kyrgyz Verbs Fully Conjugated in All Tenses. CreateSpace Independent Publishing Platform, Online.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Changbing Yang, and Mans Hulden. 2020. Linguist vs. machine: Rapid development of finite-state morphological grammars", |
|
"authors": [ |
|
{ |
|
"first": "Sarah", |
|
"middle": [], |
|
"last": "Beemer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zak", |
|
"middle": [], |
|
"last": "Boston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "April", |
|
"middle": [], |
|
"last": "Bukoski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Princess", |
|
"middle": [], |
|
"last": "Dickens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Gerlach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Torin", |
|
"middle": [], |
|
"last": "Hopkins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Parth Anand Jawale", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Akanksha", |
|
"middle": [], |
|
"last": "Koski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piyush", |
|
"middle": [], |
|
"last": "Malhotra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Saliha", |
|
"middle": [], |
|
"last": "Mishra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lan", |
|
"middle": [], |
|
"last": "Murado\u011flu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tyler", |
|
"middle": [], |
|
"last": "Sang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sagarika", |
|
"middle": [], |
|
"last": "Short", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elizabeth", |
|
"middle": [], |
|
"last": "Shreevastava", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tetsumichi", |
|
"middle": [], |
|
"last": "Spaulding", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Beilei", |
|
"middle": [], |
|
"last": "Umada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Xiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the 17th SIGMORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sarah Beemer, Zak Boston, April Bukoski, Daniel Chen, Princess Dickens, Andrew Gerlach, Torin Hopkins, Parth Anand Jawale, Chris Koski, Akanksha Malhotra, Piyush Mishra, Saliha Mu- rado\u011flu, Lan Sang, Tyler Short, Sagarika Shreev- astava, Elizabeth Spaulding, Tetsumichi Umada, Beilei Xiang, Changbing Yang, and Mans Hulden. 2020. Linguist vs. machine: Rapid development of finite-state morphological grammars. In Pro- ceedings of the 17th SIGMORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphology.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Linguistically na\u00efve!= language independent: Why NLP needs linguistic typology", |
|
"authors": [ |
|
{ |
|
"first": "Emily", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Bender", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the EACL 2009 Workshop on the Interaction between Linguistics and Computational Linguistics: Virtuous, Vicious or Vacuous?", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "26--32", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emily M. Bender. 2009. Linguistically na\u00efve!= lan- guage independent: Why NLP needs linguistic typol- ogy. In Proceedings of the EACL 2009 Workshop on the Interaction between Linguistics and Compu- tational Linguistics: Virtuous, Vicious or Vacuous?, pages 26-32.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Linguistic typology in natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Emily", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Bender", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Linguistic Typology", |
|
"volume": "20", |
|
"issue": "3", |
|
"pages": "645--660", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emily M. Bender. 2016. Linguistic typology in nat- ural language processing. Linguistic Typology, 20(3):645-660.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "The child's learning of english morphology", |
|
"authors": [ |
|
{ |
|
"first": "Jean", |
|
"middle": [], |
|
"last": "Berko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1958, |
|
"venue": "Word", |
|
"volume": "14", |
|
"issue": "2-3", |
|
"pages": "150--177", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jean Berko. 1958. The child's learning of english mor- phology. Word, 14(2-3):150-177.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Exponence of selected inflectional formatives", |
|
"authors": [ |
|
{ |
|
"first": "Balthasar", |
|
"middle": [], |
|
"last": "Bickel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johanna", |
|
"middle": [], |
|
"last": "Nichols", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "The World Atlas of Language Structures Online", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Balthasar Bickel and Johanna Nichols. 2013a. Ex- ponence of selected inflectional formatives. In Matthew S. Dryer and Martin Haspelmath, editors, The World Atlas of Language Structures Online.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Fusion of selected inflectional formatives", |
|
"authors": [ |
|
{ |
|
"first": "Balthasar", |
|
"middle": [], |
|
"last": "Bickel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johanna", |
|
"middle": [], |
|
"last": "Nichols", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "The World Atlas of Language Structures Online. Max Planck Institute for Evolutionary Anthropology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Balthasar Bickel and Johanna Nichols. 2013b. Fusion of selected inflectional formatives. In Matthew S. Dryer and Martin Haspelmath, editors, The World At- las of Language Structures Online. Max Planck Insti- tute for Evolutionary Anthropology, Leipzig.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "The World Atlas of Language Structures Online", |
|
"authors": [ |
|
{ |
|
"first": "Balthasar", |
|
"middle": [], |
|
"last": "Bickel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johanna", |
|
"middle": [], |
|
"last": "Nichols", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Balthasar Bickel and Johanna Nichols. 2013c. Inflec- tional synthesis of the verb. In Matthew S. Dryer and Martin Haspelmath, editors, The World Atlas of Language Structures Online. Max Planck Institute for Evolutionary Anthropology, Leipzig.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Tone and inflection in zenzontepec chatino. Tone and inflection", |
|
"authors": [ |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Campbell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "141--162", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eric Campbell. 2016. Tone and inflection in zenzonte- pec chatino. Tone and inflection, pages 141-162.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "University of illinois submission to the SIGMORPHON 2020 shared task 0: Typologically diverse morphological inflection", |
|
"authors": [ |
|
{ |
|
"first": "Marc", |
|
"middle": [], |
|
"last": "Canby", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidana", |
|
"middle": [], |
|
"last": "Karipbayeva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bryan", |
|
"middle": [], |
|
"last": "Lunt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sahand", |
|
"middle": [], |
|
"last": "Mozaffari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Charlotte", |
|
"middle": [], |
|
"last": "Yoder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julia", |
|
"middle": [], |
|
"last": "Hockenmaier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 17th SIGMORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marc Canby, Aidana Karipbayeva, Bryan Lunt, Sa- hand Mozaffari, Charlotte Yoder, and Julia Hocken- maier. 2020. University of illinois submission to the SIGMORPHON 2020 shared task 0: Typologically diverse morphological inflection. In Proceedings of the 17th SIGMORPHON Workshop on Computa- tional Research in Phonetics, Phonology, and Mor- phology.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Language Universals and Linguistic Typology: Syntax and Morphology", |
|
"authors": [ |
|
{ |
|
"first": "Bernard", |
|
"middle": [], |
|
"last": "Comrie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1989, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bernard Comrie. 1989. Language Universals and Lin- guistic Typology: Syntax and Morphology. Univer- sity of Chicago Press.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "The conllsigmorphon 2018 shared task: Universal morphological reinflection", |
|
"authors": [ |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Cotterell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christo", |
|
"middle": [], |
|
"last": "Kirov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Sylak-Glassman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G\u00e9raldine", |
|
"middle": [], |
|
"last": "Walther", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ekaterina", |
|
"middle": [], |
|
"last": "Vylomova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Arya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katharina", |
|
"middle": [], |
|
"last": "Mc-Carthy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Sebastian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Garrett", |
|
"middle": [], |
|
"last": "Mielke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miikka", |
|
"middle": [], |
|
"last": "Nicolai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Silfverberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the CoNLL-SIGMORPHON 2018 Shared Task: Universal Morphological Reinflection", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--27", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ryan Cotterell, Christo Kirov, John Sylak-Glassman, G\u00e9raldine Walther, Ekaterina Vylomova, Arya D Mc- Carthy, Katharina Kann, Sebastian J Mielke, Garrett Nicolai, Miikka Silfverberg, et al. 2018. The conll- sigmorphon 2018 shared task: Universal morpho- logical reinflection. In Proceedings of the CoNLL- SIGMORPHON 2018 Shared Task: Universal Mor- phological Reinflection, pages 1-27.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "CoNLL-SIGMORPHON 2017 shared task: Universal morphological reinflection in 52 languages", |
|
"authors": [ |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Cotterell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christo", |
|
"middle": [], |
|
"last": "Kirov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Sylak-Glassman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G\u00e9raldine", |
|
"middle": [], |
|
"last": "Walther", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ekaterina", |
|
"middle": [], |
|
"last": "Vylomova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Xia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manaal", |
|
"middle": [], |
|
"last": "Faruqui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandra", |
|
"middle": [], |
|
"last": "K\u00fcbler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Yarowsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Eisner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mans", |
|
"middle": [], |
|
"last": "Hulden", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the CoNLL SIGMORPHON 2017 Shared Task: Universal Morphological Reinflection", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--30", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/K17-2001" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ryan Cotterell, Christo Kirov, John Sylak-Glassman, G\u00e9raldine Walther, Ekaterina Vylomova, Patrick Xia, Manaal Faruqui, Sandra K\u00fcbler, David Yarowsky, Jason Eisner, and Mans Hulden. 2017. CoNLL- SIGMORPHON 2017 shared task: Universal mor- phological reinflection in 52 languages. In Pro- ceedings of the CoNLL SIGMORPHON 2017 Shared Task: Universal Morphological Reinflection, pages 1-30, Vancouver. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Linguistic Poetic and Rhetoric of Eastern Chatino of San Juan Quiahije", |
|
"authors": [ |
|
{ |
|
"first": "Hilaria", |
|
"middle": [], |
|
"last": "Cruz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hilaria Cruz. 2014. Linguistic Poetic and Rhetoric of Eastern Chatino of San Juan Quiahije. Ph.D. thesis.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "A resource for studying chatino verbal morphology", |
|
"authors": [ |
|
{ |
|
"first": "Hilaria", |
|
"middle": [], |
|
"last": "Cruz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonios", |
|
"middle": [], |
|
"last": "Anastasopoulos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gregory", |
|
"middle": [], |
|
"last": "Stump", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of The 12th Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2820--2824", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hilaria Cruz, Antonios Anastasopoulos, and Gregory Stump. 2020. A resource for studying chatino ver- bal morphology. In Proceedings of The 12th Lan- guage Resources and Evaluation Conference, pages 2820-2824, Marseille, France. European Language Resources Association.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Modelling Tibetan verbal morphology", |
|
"authors": [ |
|
{ |
|
"first": "Qianji", |
|
"middle": [], |
|
"last": "Di", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ekaterina", |
|
"middle": [], |
|
"last": "Vylomova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Baldwin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the The 17th Annual Workshop of the Australasian Language Technology Association", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "35--40", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qianji Di, Ekaterina Vylomova, and Timothy Baldwin. 2019. Modelling Tibetan verbal morphology. In Proceedings of the The 17th Annual Workshop of the Australasian Language Technology Association, pages 35-40.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Position of case affixes", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Dryer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "The World Atlas of Language Structures Online", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew S. Dryer. 2013. Position of case affixes. In Matthew S. Dryer and Martin Haspelmath, edi- tors, The World Atlas of Language Structures Online.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "WALS Online. Max Planck Institute for Evolutionary Anthropology", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Dryer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Haspelmath", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew S. Dryer and Martin Haspelmath, editors. 2013. WALS Online. Max Planck Institute for Evo- lutionary Anthropology, Leipzig.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Z\u00fcrit\u00fc\u00fctsch verstaa -Z\u00fcrit\u00fc\u00fctsch rede", |
|
"authors": [ |
|
{ |
|
"first": "Renate", |
|
"middle": [], |
|
"last": "Egli-Wildi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Renate Egli-Wildi. 2007. Z\u00fcrit\u00fc\u00fctsch verstaa - Z\u00fcrit\u00fc\u00fctsch rede. K\u00fcsnacht.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Oto-Manguean Inflectional Class Database", |
|
"authors": [ |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Feist", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Enrique", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Palancar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.15126/SMG.28/1" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Timothy Feist and Enrique L. Palancar. 2015. Oto- Manguean Inflectional Class Database. University of Surrey, Online.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Principal parts and morphological typology", |
|
"authors": [ |
|
{ |
|
"first": "Raphael", |
|
"middle": [], |
|
"last": "Finkel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gregory", |
|
"middle": [], |
|
"last": "Stump", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Morphology", |
|
"volume": "17", |
|
"issue": "1", |
|
"pages": "39--75", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Raphael Finkel and Gregory Stump. 2007. Princi- pal parts and morphological typology. Morphology, 17(1):39-75.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "SIGMOR-PHON 2020 task 0 system description: ETH Z\u00fcrich team", |
|
"authors": [ |
|
{ |
|
"first": "Martina", |
|
"middle": [], |
|
"last": "Forster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clara", |
|
"middle": [], |
|
"last": "Meister", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 17th SIGMORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martina Forster and Clara Meister. 2020. SIGMOR- PHON 2020 task 0 system description: ETH Z\u00fcrich team. In Proceedings of the 17th SIGMORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphology.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Afroasiatic Languages", |
|
"authors": [ |
|
{ |
|
"first": "Zygmunt", |
|
"middle": [], |
|
"last": "Frajzyngier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Oxford Research Encyclopedia of Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1093/acrefore/9780199384655.013.15" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zygmunt Frajzyngier. 2018. Afroasiatic Languages. In Oxford Research Encyclopedia of Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Weird inflects but ok: Making sense of morphological generation errors", |
|
"authors": [ |
|
{ |
|
"first": "Kyle", |
|
"middle": [], |
|
"last": "Gorman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arya", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Mccarthy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Cotterell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ekaterina", |
|
"middle": [], |
|
"last": "Vylomova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miikka", |
|
"middle": [], |
|
"last": "Silfverberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Magdalena", |
|
"middle": [], |
|
"last": "Markowska", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "140--151", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kyle Gorman, Arya D. McCarthy, Ryan Cotterell, Ekaterina Vylomova, Miikka Silfverberg, and Mag- dalena Markowska. 2019. Weird inflects but ok: Making sense of morphological generation errors. In Proceedings of the 23rd Conference on Computa- tional Natural Language Learning (CoNLL), pages 140-151.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Universals of language", |
|
"authors": [ |
|
{ |
|
"first": "Joseph", |
|
"middle": [ |
|
"Harold" |
|
], |
|
"last": "Greenberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1963, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joseph Harold Greenberg. 1963. Universals of lan- guage.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Heterogeneous ensemble combination search using genetic algorithm for class imbalanced data classification", |
|
"authors": [ |
|
{ |
|
"first": "Nasimul", |
|
"middle": [], |
|
"last": "Mohammad Nazmul Haque", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Regina", |
|
"middle": [], |
|
"last": "Noman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pablo", |
|
"middle": [], |
|
"last": "Berretta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Moscato", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "PloS one", |
|
"volume": "", |
|
"issue": "1", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohammad Nazmul Haque, Nasimul Noman, Regina Berretta, and Pablo Moscato. 2016. Heterogeneous ensemble combination search using genetic algo- rithm for class imbalanced data classification. PloS one, 11(1).", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Maori: A Linguistic Introduction", |
|
"authors": [ |
|
{ |
|
"first": "Ray", |
|
"middle": [], |
|
"last": "Harlow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ray Harlow. 2007. Maori: A Linguistic Introduction. Cambridge University Press.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Pre-established categories don't exist: Consequences for language description and typology", |
|
"authors": [ |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Haspelmath", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Linguistic Typology", |
|
"volume": "11", |
|
"issue": "1", |
|
"pages": "119--132", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martin Haspelmath. 2007. Pre-established categories don't exist: Consequences for language description and typology. Linguistic Typology, 11(1):119-132.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Grammar of humburi senni (songhay of hombori, mali)", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Heath", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Heath. 2014. Grammar of humburi senni (song- hay of hombori, mali).", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Semi-supervised learning of morphological paradigms and lexicons", |
|
"authors": [ |
|
{ |
|
"first": "Mans", |
|
"middle": [], |
|
"last": "Hulden", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Markus", |
|
"middle": [], |
|
"last": "Forsberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Malin", |
|
"middle": [], |
|
"last": "Ahlberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 14th Conference of the European Chapter of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "569--578", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mans Hulden, Markus Forsberg, and Malin Ahlberg. 2014. Semi-supervised learning of morphological paradigms and lexicons. In Proceedings of the 14th Conference of the European Chapter of the Associa- tion for Computational Linguistics, pages 569-578.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "A Lecture on the Grammatical Construction of the Cree Language. Also Paradigms of the Cree Verb (Original work published 1875. The Society for Promoting Christian Knowledge", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Hunter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1923, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James Hunter. 1923. A Lecture on the Grammatical Construction of the Cree Language. Also Paradigms of the Cree Verb (Original work published 1875. The Society for Promoting Christian Knowledge, Lon- don.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "102 Akan Verbs. CreateSpace Independent Publishing Platform", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Paa Kwesi Imbeah", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paa Kwesi Imbeah. 2012. 102 Akan Verbs. CreateS- pace Independent Publishing Platform, Online.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "V\u00f5ru kirjakeele s\u00f5namuutmiss\u00fcsteem", |
|
"authors": [ |
|
{ |
|
"first": "Sulev", |
|
"middle": [], |
|
"last": "Iva", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sulev Iva. 2007. V\u00f5ru kirjakeele s\u00f5namuutmiss\u00fcsteem. Ph.D. thesis.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Exploring neural architectures and techniques for typologically diverse morphological inflection", |
|
"authors": [ |
|
{ |
|
"first": "Pratik", |
|
"middle": [], |
|
"last": "Jayarao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siddhanth", |
|
"middle": [], |
|
"last": "Pillay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Thombre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aditi", |
|
"middle": [], |
|
"last": "Chaudhary", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 17th SIGMORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pratik Jayarao, Siddhanth Pillay, Pranav Thombre, and Aditi Chaudhary. 2020. Exploring neural architec- tures and techniques for typologically diverse mor- phological inflection. In Proceedings of the 17th SIGMORPHON Workshop on Computational Re- search in Phonetics, Phonology, and Morphology.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Uyghur language: 94 Uyghur verbs in common tenses", |
|
"authors": [ |
|
{ |
|
"first": "Alim", |
|
"middle": [], |
|
"last": "Kadeer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alim Kadeer. 2016. Uyghur language: 94 Uyghur verbs in common tenses. CreateSpace Independent Publishing Platform, Online.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "102 Ga Verbs. CreateSpace Independent Publishing Platform", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kasahorow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kasahorow. 2012a. 102 Ga Verbs. CreateSpace Inde- pendent Publishing Platform, Online.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "102 Swahili Verbs", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kasahorow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kasahorow. 2012b. 102 Swahili Verbs. CreateSpace Independent Publishing Platform, Online.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "102 Lingala Verbs: Master the Simple Tenses of the Lingala", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kasahorow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kasahorow. 2014a. 102 Lingala Verbs: Master the Sim- ple Tenses of the Lingala. CreateSpace Independent Publishing Platform, Online.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "102 Shona Verbs: Master the simple tenses of the Shona language", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kasahorow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kasahorow. 2014b. 102 Shona Verbs: Master the sim- ple tenses of the Shona language. CreateSpace Inde- pendent Publishing Platform, Online.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "Modern Malagasy Verbs: Master the Simple Tenses of the Malagasy Language", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kasahorow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kasahorow. 2015a. Modern Malagasy Verbs: Master the Simple Tenses of the Malagasy Language. Cre- ateSpace Independent Publishing Platform, Online.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Modern Zulu Verbs: Master the simple tenses of the Zulu language", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kasahorow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kasahorow. 2015b. Modern Zulu Verbs: Master the simple tenses of the Zulu language. CreateSpace In- dependent Publishing Platform, Online.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "Modern Kongo Verbs: Master the Simple Tenses of the Kongo Language", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kasahorow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kasahorow. 2016. Modern Kongo Verbs: Master the Simple Tenses of the Kongo Language. CreateSpace Independent Publishing Platform, Online.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "Modern Oromo Dictionary: Oromo-English, English-Oromo", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kasahorow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kasahorow. 2017. Modern Oromo Dictionary: Oromo- English, English-Oromo. CreateSpace Independent Publishing Platform, Online.", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "Modern Chewa Verbs: Master the basic tenses of Chewa", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kasahorow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kasahorow. 2019a. Modern Chewa Verbs: Master the basic tenses of Chewa. CreateSpace Independent Publishing Platform, Online.", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "Modern Zarma Verbs: Master the basic tenses of Zarma", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kasahorow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kasahorow. 2019b. Modern Zarma Verbs: Master the basic tenses of Zarma. CreateSpace Independent Publishing Platform, Online.", |
|
"links": null |
|
}, |
|
"BIBREF51": { |
|
"ref_id": "b51", |
|
"title": "Modern Sotho Verbs: Master the basic tenses of Sotho (Sotho dictionary", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kasahorow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kasahorow. 2020. Modern Sotho Verbs: Master the basic tenses of Sotho (Sotho dictionary. CreateSpace Independent Publishing Platform, Online.", |
|
"links": null |
|
}, |
|
"BIBREF52": { |
|
"ref_id": "b52", |
|
"title": "LowResourceEval-2019: a shared task on morphological analysis for low-resource languages", |
|
"authors": [ |
|
{ |
|
"first": "Elena", |
|
"middle": [], |
|
"last": "Klyachko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexey", |
|
"middle": [], |
|
"last": "Sorokin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Natalia", |
|
"middle": [], |
|
"last": "Krizhanovskaya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Krizhanovsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Galina", |
|
"middle": [], |
|
"last": "Ryazanskaya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2001.11285" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elena Klyachko, Alexey Sorokin, Natalia Krizhanovskaya, Andrew Krizhanovsky, and Galina Ryazanskaya. 2020. LowResourceEval- 2019: a shared task on morphological analysis for low-resource languages. arXiv preprint arXiv:2001.11285.", |
|
"links": null |
|
}, |
|
"BIBREF53": { |
|
"ref_id": "b53", |
|
"title": "Statistical significance tests for machine translation evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the 2004 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "388--395", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn. 2004. Statistical significance tests for machine translation evaluation. In Proceed- ings of the 2004 Conference on Empirical Meth- ods in Natural Language Processing, pages 388- 395, Barcelona, Spain. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF54": { |
|
"ref_id": "b54", |
|
"title": "550 Dakota Verbs", |
|
"authors": [ |
|
{ |
|
"first": "Harlan", |
|
"middle": [], |
|
"last": "Lafontaine", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Neil", |
|
"middle": [], |
|
"last": "Mckay", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Harlan LaFontaine and Neil McKay. 2005. 550 Dakota Verbs. Minnesota Historical Society Press, Online.", |
|
"links": null |
|
}, |
|
"BIBREF55": { |
|
"ref_id": "b55", |
|
"title": "Concepticon: A resource for the linking of concept lists", |
|
"authors": [ |
|
{ |
|
"first": "Johann-Mattis", |
|
"middle": [], |
|
"last": "List", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Cysouw", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Forkel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2393--2400", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Johann-Mattis List, Michael Cysouw, and Robert Forkel. 2016. Concepticon: A resource for the link- ing of concept lists. In Proceedings of the Tenth In- ternational Conference on Language Resources and Evaluation (LREC'16), pages 2393-2400.", |
|
"links": null |
|
}, |
|
"BIBREF56": { |
|
"ref_id": "b56", |
|
"title": "Leveraging principal parts for morphological inflection", |
|
"authors": [ |
|
{ |
|
"first": "Ling", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mans", |
|
"middle": [], |
|
"last": "Hulden", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 17th SIGMORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ling Liu and Mans Hulden. 2020. Leveraging principal parts for morphological inflection. In Proceedings of the 17th SIGMORPHON Workshop on Computa- tional Research in Phonetics, Phonology, and Mor- phology.", |
|
"links": null |
|
}, |
|
"BIBREF57": { |
|
"ref_id": "b57", |
|
"title": "Murrinhpatha Morphology and Phonology", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Mansfield", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "653", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Mansfield. 2019. Murrinhpatha Morphology and Phonology, volume 653. Walter de Gruyter.", |
|
"links": null |
|
}, |
|
"BIBREF58": { |
|
"ref_id": "b58", |
|
"title": "From softmax to sparsemax: A sparse model of attention and multi-label classification", |
|
"authors": [ |
|
{ |
|
"first": "Andr\u00e9", |
|
"middle": [], |
|
"last": "Martins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ramon", |
|
"middle": [], |
|
"last": "Astudillo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1614--1623", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andr\u00e9 Martins and Ramon Astudillo. 2016. From soft- max to sparsemax: A sparse model of attention and multi-label classification. In International Confer- ence on Machine Learning, pages 1614-1623.", |
|
"links": null |
|
}, |
|
"BIBREF59": { |
|
"ref_id": "b59", |
|
"title": "Te Aka Online M\u0101ori Dictionary", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Moorfield", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John C. Moorfield. 2019. Te Aka Online M\u0101ori Dictio- nary. Online.", |
|
"links": null |
|
}, |
|
"BIBREF60": { |
|
"ref_id": "b60", |
|
"title": "2020. The CMU-LTI submission to the SIGMORPHON 2020 shared task 0: Language-specific cross-lingual transfer", |
|
"authors": [ |
|
{ |
|
"first": "Nikitha", |
|
"middle": [], |
|
"last": "Murikinati", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonios", |
|
"middle": [], |
|
"last": "Anastasopoulos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the 17th SIGMORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nikitha Murikinati and Antonios Anastasopoulos. 2020. The CMU-LTI submission to the SIGMORPHON 2020 shared task 0: Language-specific cross-lingual transfer. In Proceedings of the 17th SIGMORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphology.", |
|
"links": null |
|
}, |
|
"BIBREF61": { |
|
"ref_id": "b61", |
|
"title": "Kazakh Language: 101 Kazakh Verbs. Preceptor Language Guides", |
|
"authors": [ |
|
{ |
|
"first": "Temir", |
|
"middle": [], |
|
"last": "Nabiyev", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Temir Nabiyev. 2015. Kazakh Language: 101 Kazakh Verbs. Preceptor Language Guides, Online.", |
|
"links": null |
|
}, |
|
"BIBREF62": { |
|
"ref_id": "b62", |
|
"title": "Luganda Language: 101 Luganda Verbs", |
|
"authors": [ |
|
{ |
|
"first": "Mirembe", |
|
"middle": [], |
|
"last": "Namono", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mirembe Namono. 2018. Luganda Language: 101 Lu- ganda Verbs. CreateSpace Independent Publishing Platform, Online.", |
|
"links": null |
|
}, |
|
"BIBREF63": { |
|
"ref_id": "b63", |
|
"title": "Shona Language: 101 Shona Verbs. CreateSpace Independent Publishing Platform", |
|
"authors": [ |
|
{ |
|
"first": "Idai", |
|
"middle": [], |
|
"last": "Nandoro", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Idai Nandoro. 2018. Shona Language: 101 Shona Verbs. CreateSpace Independent Publishing Plat- form, Online.", |
|
"links": null |
|
}, |
|
"BIBREF64": { |
|
"ref_id": "b64", |
|
"title": "Table of Tagalog Verbs", |
|
"authors": [], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Center for Southeast Asian Studies NIU. 2017. Table of Tagalog Verbs. CreateSpace Independent Publishing Platform, Online.", |
|
"links": null |
|
}, |
|
"BIBREF65": { |
|
"ref_id": "b65", |
|
"title": "Tone and inflection: New facts and new perspectives", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Enrique", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jean", |
|
"middle": [], |
|
"last": "Palancar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "L\u00e9o L\u00e9onard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "296", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Enrique L Palancar and Jean L\u00e9o L\u00e9onard. 2016. Tone and inflection: New facts and new perspectives, vol- ume 296. Walter de Gruyter GmbH & Co KG.", |
|
"links": null |
|
}, |
|
"BIBREF66": { |
|
"ref_id": "b66", |
|
"title": "One-sizefits-all multilingual models", |
|
"authors": [ |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Andr\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Martins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 17th SIGMORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ben Peters and Andr\u00e9 F. T Martins. 2020. One-size- fits-all multilingual models. In Proceedings of the 17th SIGMORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphology.", |
|
"links": null |
|
}, |
|
"BIBREF67": { |
|
"ref_id": "b67", |
|
"title": "It-ist at the sigmorphon 2019 shared task: Sparse two-headed models for inflection", |
|
"authors": [ |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Andr\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Martins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 16th Workshop on Computational Research in Phonetics, Phonology, and Morphology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "50--56", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ben Peters and Andr\u00e9 FT Martins. 2019. It-ist at the sigmorphon 2019 shared task: Sparse two-headed models for inflection. In Proceedings of the 16th Workshop on Computational Research in Phonetics, Phonology, and Morphology, pages 50-56.", |
|
"links": null |
|
}, |
|
"BIBREF68": { |
|
"ref_id": "b68", |
|
"title": "Sparse sequence-to-sequence models", |
|
"authors": [ |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vlad", |
|
"middle": [], |
|
"last": "Niculae", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andr\u00e9 Ft", |
|
"middle": [], |
|
"last": "Martins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1504--1519", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ben Peters, Vlad Niculae, and Andr\u00e9 FT Martins. 2019. Sparse sequence-to-sequence models. In Proceed- ings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 1504-1519.", |
|
"links": null |
|
}, |
|
"BIBREF69": { |
|
"ref_id": "b69", |
|
"title": "Synchronic and diachronic perspective on'word'in siouan. Word: a crosslinguistic typology", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Robert L Rankin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Randolph", |
|
"middle": [], |
|
"last": "Boyle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Graczyk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Koontz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "180--204", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert L Rankin, John Boyle, Randolph Graczyk, and John E Koontz. 2003. Synchronic and diachronic perspective on'word'in siouan. Word: a cross- linguistic typology, pages 180-204.", |
|
"links": null |
|
}, |
|
"BIBREF70": { |
|
"ref_id": "b70", |
|
"title": "Cebuano Language: 101 Cebuano Verbs", |
|
"authors": [ |
|
{ |
|
"first": "Dakila", |
|
"middle": [], |
|
"last": "Reyes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dakila Reyes. 2015. Cebuano Language: 101 Ce- buano Verbs. CreateSpace Independent Publishing Platform, Online.", |
|
"links": null |
|
}, |
|
"BIBREF71": { |
|
"ref_id": "b71", |
|
"title": "Hiligaynon Language. 101 Hiligaynon Verbs", |
|
"authors": [ |
|
{ |
|
"first": "Anj", |
|
"middle": [], |
|
"last": "Santos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anj Santos. 2018. Hiligaynon Language. 101 Hili- gaynon Verbs. CreateSpace Independent Publishing Platform, Online.", |
|
"links": null |
|
}, |
|
"BIBREF72": { |
|
"ref_id": "b72", |
|
"title": "The UniMelb submission to the SIGMORPHON 2020 shared task 0: Typologically diverse morphological inflection", |
|
"authors": [ |
|
{ |
|
"first": "Andei", |
|
"middle": [], |
|
"last": "Scherbakov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 17th SIGMORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andei Scherbakov. 2020. The UniMelb submission to the SIGMORPHON 2020 shared task 0: Typologi- cally diverse morphological inflection. In Proceed- ings of the 17th SIGMORPHON Workshop on Com- putational Research in Phonetics, Phonology, and Morphology.", |
|
"links": null |
|
}, |
|
"BIBREF73": { |
|
"ref_id": "b73", |
|
"title": "Phonotactic modeling of extremely low resource languages", |
|
"authors": [ |
|
{ |
|
"first": "Andrei", |
|
"middle": [], |
|
"last": "Shcherbakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ekaterina", |
|
"middle": [], |
|
"last": "Vylomova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nick", |
|
"middle": [], |
|
"last": "Thieberger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Australasian Language Technology Association Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "84--93", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrei Shcherbakov, Ekaterina Vylomova, and Nick Thieberger. 2016. Phonotactic modeling of ex- tremely low resource languages. In Proceedings of the Australasian Language Technology Association Workshop 2016, pages 84-93.", |
|
"links": null |
|
}, |
|
"BIBREF74": { |
|
"ref_id": "b74", |
|
"title": "Data augmentation for morphological reinflection", |
|
"authors": [ |
|
{ |
|
"first": "Miikka", |
|
"middle": [], |
|
"last": "Silfverberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Wiemerslage", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ling", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lingshuang Jack", |
|
"middle": [], |
|
"last": "Mao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the CoNLL SIGMORPHON 2017 Shared Task: Universal Morphological Reinflection", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "90--99", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/K17-2010" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Miikka Silfverberg, Adam Wiemerslage, Ling Liu, and Lingshuang Jack Mao. 2017. Data augmentation for morphological reinflection. In Proceedings of the CoNLL SIGMORPHON 2017 Shared Task: Univer- sal Morphological Reinflection, pages 90-99, Van- couver. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF75": { |
|
"ref_id": "b75", |
|
"title": "2020. The NYU-CUBoulder systems for SIGMORPHON 2020 task 0 and task 2", |
|
"authors": [ |
|
{ |
|
"first": "Assaf", |
|
"middle": [], |
|
"last": "Singer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katharina", |
|
"middle": [], |
|
"last": "Kann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the 17th SIG-MORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Assaf Singer and Katharina Kann. 2020. The NYU- CUBoulder systems for SIGMORPHON 2020 task 0 and task 2. In Proceedings of the 17th SIG- MORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphology.", |
|
"links": null |
|
}, |
|
"BIBREF76": { |
|
"ref_id": "b76", |
|
"title": "Salish internal relationships", |
|
"authors": [ |
|
{ |
|
"first": "Morris", |
|
"middle": [], |
|
"last": "Swadesh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1950, |
|
"venue": "International Journal of American Linguistics", |
|
"volume": "16", |
|
"issue": "4", |
|
"pages": "157--167", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Morris Swadesh. 1950. Salish internal relation- ships. International Journal of American Linguis- tics, 16(4):157-167.", |
|
"links": null |
|
}, |
|
"BIBREF77": { |
|
"ref_id": "b77", |
|
"title": "The composition and use of the universal morphological feature schema (unimorph schema)", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Sylak-Glassman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Sylak-Glassman. 2016. The composition and use of the universal morphological feature schema (uni- morph schema). Johns Hopkins University.", |
|
"links": null |
|
}, |
|
"BIBREF78": { |
|
"ref_id": "b78", |
|
"title": "The Kazakh Verbs: Review Guide. Preceptor Language Guides", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Turkicum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Turkicum. 2019a. The Kazakh Verbs: Review Guide. Preceptor Language Guides, Online.", |
|
"links": null |
|
}, |
|
"BIBREF79": { |
|
"ref_id": "b79", |
|
"title": "The Uzbek Verbs: Review Guide", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Turkicum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Turkicum. 2019b. The Uzbek Verbs: Review Guide. CreateSpace Independent Publishing Platform, On- line.", |
|
"links": null |
|
}, |
|
"BIBREF80": { |
|
"ref_id": "b80", |
|
"title": "501 Turkmen verbs. Peace Corps", |
|
"authors": [ |
|
{ |
|
"first": "U", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Turkmenistan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Embassy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Turkmenistan US Embassy. 2018. 501 Turkmen verbs. Peace Corps, Online.", |
|
"links": null |
|
}, |
|
"BIBREF81": { |
|
"ref_id": "b81", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information pro- cessing systems, pages 5998-6008.", |
|
"links": null |
|
}, |
|
"BIBREF82": { |
|
"ref_id": "b82", |
|
"title": "Exact hard monotonic attention for character-level transduction", |
|
"authors": [ |
|
{ |
|
"first": "Shijie", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Cotterell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1530--1537", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shijie Wu and Ryan Cotterell. 2019. Exact hard mono- tonic attention for character-level transduction. In Proceedings of the 57th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 1530- 1537.", |
|
"links": null |
|
}, |
|
"BIBREF83": { |
|
"ref_id": "b83", |
|
"title": "Applying the transformer to character-level transduction", |
|
"authors": [ |
|
{ |
|
"first": "Shijie", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Cotterell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mans", |
|
"middle": [], |
|
"last": "Hulden", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shijie Wu, Ryan Cotterell, and Mans Hulden. 2020. Ap- plying the transformer to character-level transduc- tion.", |
|
"links": null |
|
}, |
|
"BIBREF84": { |
|
"ref_id": "b84", |
|
"title": "Ensemble self-training for low-resource languages:grapheme-to-phoneme conversion and morphological inflection", |
|
"authors": [ |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ngoc", |
|
"middle": [ |
|
"Thang" |
|
], |
|
"last": "Vu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonas", |
|
"middle": [], |
|
"last": "Kuhns", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 17th SIGMORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiang Yu, Ngoc Thang Vu, and Jonas Kuhns. 2020. Ensemble self-training for low-resource languages:grapheme-to-phoneme conversion and morphological inflection. In Proceedings of the 17th SIGMORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphol- ogy.", |
|
"links": null |
|
}, |
|
"BIBREF85": { |
|
"ref_id": "b85", |
|
"title": "Open corpus of Veps and Karelian languages (VepKar): preliminary data collection and dictionaries", |
|
"authors": [ |
|
{ |
|
"first": "Nina", |
|
"middle": [], |
|
"last": "Zaytseva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Krizhanovsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Natalia", |
|
"middle": [], |
|
"last": "Krizhanovsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Corpus Linguistics-2017", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "172--177", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nina Zaytseva, Andrew Krizhanovsky, Natalia Krizhanovsky, Natalia Pellinen, and Aleksndra Ro- dionova. 2017. Open corpus of Veps and Karelian languages (VepKar): preliminary data collection and dictionaries. In Corpus Linguistics-2017, pages 172-177.", |
|
"links": null |
|
}, |
|
"BIBREF86": { |
|
"ref_id": "b86", |
|
"title": "A Tohono O'odham grammar", |
|
"authors": [ |
|
{ |
|
"first": "Ofelia", |
|
"middle": [], |
|
"last": "Zepeda", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1983, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ofelia Zepeda. 2003. A Tohono O'odham grammar (Original work published 1983). University of Ari- zona Press, Online.", |
|
"links": null |
|
}, |
|
"BIBREF87": { |
|
"ref_id": "b87", |
|
"title": "Synchronous bidirectional neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Long", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiajun", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chengqing", |
|
"middle": [], |
|
"last": "Zong", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "91--105", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Long Zhou, Jiajun Zhang, and Chengqing Zong. 2019. Synchronous bidirectional neural machine transla- tion. Transactions of the Association for Computa- tional Linguistics, 7:91-105.", |
|
"links": null |
|
}, |
|
"BIBREF88": { |
|
"ref_id": "b88", |
|
"title": "Results per Language Genus (in Oto-Manguean and Uralic Families) System Rank Acc BASE: mono-shared", |
|
"authors": [], |
|
"year": null, |
|
"venue": "Table", |
|
"volume": "20", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Table 20: Results per Language Genus (in Oto-Manguean and Uralic Families) System Rank Acc BASE: mono-shared", |
|
"links": null |
|
}, |
|
"BIBREF89": { |
|
"ref_id": "b89", |
|
"title": "Results on the Finnic genus", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Results on the Finnic genus (10 languages)", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"text": "Languages in our sample colored by family.", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"text": "", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"text": "Accuracy by language averaged across all the final submitted systems with their standard deviations. Language families are demarcated by color, with accuracy on development languages (top), and generalization languages (bottom).", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF3": { |
|
"uris": null, |
|
"text": "ben crh dan deu est evn gmh gml isl izh kan kjh kpv krl liv mdf mhr mlt myv nno nob olo ood pus san sme swe syc tel udm urd vep vot vro Difficulty of Nouns: Percentage of test samples falling into each category. The total number of test samples for each language is outlined on the top of the plot.", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF4": { |
|
"uris": null, |
|
"text": "Difficulty of Verbs: Percentage of test samples falling into each category. The total number of test samples for each language is outlined on the top of the plot.", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF5": { |
|
"uris": null, |
|
"text": "Difficulty of Adjectives: Percentage of test samples falling into each category. The total number of test samples for each language is outlined on the top of the plot.", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF6": { |
|
"uris": null, |
|
"text": "This family was represented by one language, Dakota. Mean accuracy across systems was", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF7": { |
|
"uris": null, |
|
"text": "Accuracy for each system and language by the log size of the dataset. Points are color-coded according to language type: development language -development family, surprise language -development family, surprise language -surprise family.", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF9": { |
|
"uris": null, |
|
"text": "Accuracy for each system and language by the log size of the dataset, grouped by language family. Points are color-coded according to language family, and shape-coded according to language type: development language -development family, surprise language -development family, surprise language -surprise family. -01-0 NYU-CUBoulder-02-0 NYU-CUBoulder-03-0", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF10": { |
|
"uris": null, |
|
"text": "Accuracy for each language by the log size of the dataset, grouped by submitted system. Points are color-and shape-coded according to language type: development language -development family, surprise language -development family, surprise language -surprise family.", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF11": { |
|
"uris": null, |
|
"text": "on the Uto-Aztecan family (1 language)", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"text": "Development languages used in the shared task.", |
|
"num": null, |
|
"content": "<table><tr><td>3.6 Niger-Congo</td><td>characterized by a large amount of grammatical</td></tr><tr><td>Our language sample includes two genera from the Niger-Congo family, namely Bantoid and Kwa languages. These have mostly exclusively con-catenative fusion, and single exponence in verbal tense-aspect-mood. The inflectional synthesis of verbs is moderately high, e.g. with 4-5 classes per word in Swahili and Zulu. The locus of marking is inconsistent (it falls on both heads and depen-dents), and most languages are are predominantly prefixing. Full and partial reduplication is attested in most languages. Verbal person-number markers tend to be syncretic. As for nominal classes, Bantoid languages are</td><td>genders (often more than 5) assigned based on both semantic and formal rules, whereas some Akan lan-guages (like Ewe) lack a gender system. Plural tends to be always expressed by affixes or other morphological means. Case marking is generally absent or minimal. As for verbal classes, aspect is grammaticalized in Akhan (Kwa) and Zulu (Ban-toid), but not in Luganda and Swahili (Bantoid). Both past and future tenses are inflectional in Ban-toid languages. 2-3 degrees of remoteness can be distinguished in Zulu and Luganda, but not in Swahili. On the other hand, Akan (Kwa) has no opposition between past and non-past. There are</td></tr></table>", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF2": { |
|
"text": "Surprise languages used in the shared task.", |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF4": { |
|
"text": "", |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF6": { |
|
"text": "Illustration of our ranking method, over the four Zapotecan languages. Note: The final ranking is based on the actual counts (#1,#2, etc), not on the system's average rank.", |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF8": { |
|
"text": "Aggregate results on all languages. Bolded results are the ones which beat the best baseline.", |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF10": { |
|
"text": "Number of samples in training, development, test sets, as well as statistics on systematic errors (inconsistency) and percentage of samples with lemmata observed in the training set.", |
|
"num": null, |
|
"content": "<table><tr><td>Lang</td><td/><td>Total</td><td/><td colspan=\"3\">Inconsistency (%)</td><td colspan=\"4\">Contradiction (%) In Vocabulary (%)</td></tr><tr><td/><td>Train</td><td>Dev</td><td colspan=\"4\">Test Train Dev Test</td><td>Dev</td><td>Test</td><td>Dev</td><td>Test</td></tr><tr><td>lld</td><td>5073</td><td>725</td><td>1450</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>24.3</td><td>12.3</td></tr><tr><td>lud</td><td>294</td><td>41</td><td>82</td><td>7.8</td><td>0.0</td><td>3.7</td><td>9.8</td><td>11.0</td><td>31.7</td><td>20.7</td></tr><tr><td>lug</td><td>3420</td><td>489</td><td>977</td><td>4.0</td><td>0.6</td><td>0.8</td><td>5.1</td><td>7.6</td><td>18.2</td><td>9.1</td></tr><tr><td>mao</td><td>145</td><td>21</td><td>42</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>61.9</td><td>81.0</td></tr><tr><td>mdf</td><td>46362</td><td>6633</td><td>13255</td><td>1.6</td><td>0.2</td><td>0.5</td><td>3.1</td><td>3.3</td><td>49.0</td><td>35.1</td></tr><tr><td>mhr</td><td>71143</td><td>10081</td><td>20233</td><td>0.3</td><td>0.0</td><td>0.0</td><td>0.4</td><td>0.5</td><td>48.8</td><td>34.3</td></tr><tr><td>mlg</td><td>447</td><td>62</td><td>127</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>90.3</td><td>74.0</td></tr><tr><td>mlt</td><td>1233</td><td>176</td><td>353</td><td>0.1</td><td>0.0</td><td>0.0</td><td>0.6</td><td>0.0</td><td>52.3</td><td>30.6</td></tr><tr><td>mwf</td><td>777</td><td>111</td><td>222</td><td>2.6</td><td>0.0</td><td>0.9</td><td>2.7</td><td>4.5</td><td>25.2</td><td>13.1</td></tr><tr><td>myv</td><td>74928</td><td>10738</td><td>21498</td><td>1.7</td><td>0.3</td><td>0.5</td><td>3.1</td><td>3.1</td><td>45.5</td><td>32.7</td></tr><tr><td>nld</td><td>38826</td><td>5547</td><td>11094</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>58.2</td><td>38.4</td></tr><tr><td>nno</td><td>10101</td><td>1443</td><td>2887</td><td>3.4</td><td>0.4</td><td>1.0</td><td>6.0</td><td>6.8</td><td>80.0</td><td>70.2</td></tr><tr><td>nob</td><td>13263</td><td>1929</td><td>3830</td><td>10.5</td><td>1.8</td><td>3.1</td><td>18.5</td><td>19.7</td><td>80.5</td><td>70.5</td></tr><tr><td>nya</td><td>3031</td><td>429</td><td>853</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>46.4</td><td>26.5</td></tr><tr><td>olo</td><td>43936</td><td>6260</td><td>12515</td><td>1.4</td><td>0.3</td><td>0.5</td><td>3.3</td><td>2.9</td><td>83.0</td><td>70.8</td></tr><tr><td>ood</td><td>1123</td><td>160</td><td>314</td><td>0.4</td><td>0.0</td><td>0.0</td><td>1.9</td><td>1.0</td><td>70.0</td><td>58.0</td></tr><tr><td>orm</td><td>1424</td><td>203</td><td>405</td><td>0.2</td><td>0.0</td><td>0.2</td><td>0.5</td><td>0.7</td><td>41.9</td><td>22.7</td></tr><tr><td>ote</td><td>22962</td><td>3231</td><td>6437</td><td>0.4</td><td>0.1</td><td>0.1</td><td>0.5</td><td>0.8</td><td>48.4</td><td>29.5</td></tr><tr><td>otm</td><td>21533</td><td>3020</td><td>5997</td><td>0.9</td><td>0.1</td><td>0.3</td><td>1.8</td><td>1.7</td><td>49.4</td><td>29.4</td></tr><tr><td>pei</td><td>10017</td><td>1349</td><td>2636</td><td>15.8</td><td>2.6</td><td>4.9</td><td>21.5</td><td>21.4</td><td>9.1</td><td>4.7</td></tr><tr><td>pus</td><td>4861</td><td>695</td><td>1389</td><td>3.9</td><td>0.6</td><td>1.6</td><td>9.9</td><td>7.7</td><td>34.2</td><td>23.0</td></tr><tr><td>san</td><td>22968</td><td>3188</td><td>6272</td><td>3.1</td><td>0.5</td><td>0.9</td><td>4.5</td><td>5.5</td><td>26.9</td><td>14.6</td></tr><tr><td>sme</td><td>43877</td><td>6273</td><td>12527</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>28.2</td><td>16.3</td></tr><tr><td>sna</td><td>1897</td><td>246</td><td>456</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>31.3</td><td>18.0</td></tr><tr><td>sot</td><td>345</td><td>50</td><td>99</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>48.0</td><td>25.3</td></tr><tr><td>swa</td><td>3374</td><td>469</td><td>910</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>20.7</td><td>10.5</td></tr><tr><td>swe</td><td>54888</td><td>7840</td><td>15683</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>70.6</td><td>51.9</td></tr><tr><td>syc</td><td>1917</td><td>275</td><td>548</td><td>3.5</td><td>1.5</td><td>0.4</td><td>7.6</td><td>8.6</td><td>47.3</td><td>28.1</td></tr><tr><td>tel</td><td>952</td><td>136</td><td>273</td><td>1.4</td><td>0.0</td><td>1.1</td><td>0.7</td><td>2.6</td><td>62.5</td><td>39.6</td></tr><tr><td>tgk</td><td>53</td><td>8</td><td>16</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td></tr><tr><td>tgl</td><td>1870</td><td>236</td><td>478</td><td>7.6</td><td>1.3</td><td>1.0</td><td>11.9</td><td>10.0</td><td>74.2</td><td>55.6</td></tr><tr><td>tuk</td><td>20963</td><td>2992</td><td>5979</td><td>9.5</td><td>1.5</td><td>3.2</td><td>16.8</td><td>16.0</td><td>16.7</td><td>8.3</td></tr><tr><td>udm</td><td>88774</td><td>12665</td><td>25333</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>38.1</td><td>24.8</td></tr><tr><td>uig</td><td>5372</td><td>750</td><td>1476</td><td>0.3</td><td>0.0</td><td>0.0</td><td>0.3</td><td>0.5</td><td>12.0</td><td>6.1</td></tr><tr><td>urd</td><td>8486</td><td>1213</td><td>2425</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>9.4</td><td>6.0</td></tr><tr><td>uzb</td><td>25199</td><td>3596</td><td>7191</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>11.9</td><td>6.0</td></tr><tr><td>vec</td><td>12203</td><td>1743</td><td>3487</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>20.8</td><td>10.6</td></tr><tr><td>vep</td><td>94395</td><td>13320</td><td>26422</td><td>10.9</td><td>1.8</td><td>3.3</td><td>19.3</td><td>19.8</td><td>25.1</td><td>12.9</td></tr><tr><td>vot</td><td>1003</td><td>146</td><td>281</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>35.6</td><td>19.6</td></tr><tr><td>vro</td><td>357</td><td>51</td><td>103</td><td>1.1</td><td>0.0</td><td>0.0</td><td>2.0</td><td>1.0</td><td>70.6</td><td>50.5</td></tr><tr><td>xno</td><td>178</td><td>26</td><td>51</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>19.2</td><td>9.8</td></tr><tr><td>xty</td><td>2110</td><td>299</td><td>600</td><td>0.1</td><td>0.3</td><td>0.0</td><td>0.3</td><td>1.3</td><td>78.6</td><td>65.8</td></tr><tr><td>zpv</td><td>805</td><td>113</td><td>228</td><td>0.0</td><td>0.0</td><td>0.4</td><td>2.7</td><td>0.9</td><td>78.8</td><td>78.9</td></tr><tr><td>zul</td><td>322</td><td>42</td><td>78</td><td>1.9</td><td>0.0</td><td>0.0</td><td>2.4</td><td>0.0</td><td>83.3</td><td>66.7</td></tr><tr><td colspan=\"4\">TOTAL 1574004 223649 446580</td><td>2.0</td><td>0.3</td><td>0.6</td><td>3.6</td><td>3.6</td><td>41.1</td><td>27.9</td></tr><tr><td colspan=\"11\">Table 7: Number of samples in training, development, test sets, as well as statistics on systematic errors (inconsis-</td></tr><tr><td colspan=\"8\">tency) and percentage of samples with lemmata observed in the training set.</td><td/><td/><td/></tr></table>", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF11": { |
|
"text": "", |
|
"num": null, |
|
"content": "<table><tr><td>System</td><td colspan=\"2\">Rank Acc</td><td>System</td><td colspan=\"2\">Rank Acc</td></tr><tr><td>uiuc-01-0</td><td>1.0</td><td>96.4</td><td>CULing-01-0</td><td>1.0</td><td>73.0</td></tr><tr><td>CULing-01-0</td><td>1.0</td><td>96.3</td><td>flexica-03-1</td><td>1.0</td><td>70.4</td></tr><tr><td>deepspin-02-1</td><td colspan=\"2\">3.7 94.4</td><td>IMS-00-0 uiuc-01-0 ETHZ-02-1 cmu_tartan_02-1 flexica-02-1 cmu_tartan_00-1</td><td>1.0 1.0 1.0 1.0 1.0 8.0</td><td>70.3 70.3 69.4 69.4 69.4 69.2</td></tr><tr><td>NYU-CUBoulder-04-0</td><td>9.7</td><td>94.3</td><td>BASE: mono-aug-shared</td><td>8.0</td><td>68.5</td></tr><tr><td>BASE: mono-single</td><td>6.3</td><td>92.8</td><td>BASE: mono-aug-single</td><td>8.0</td><td>68.5</td></tr><tr><td>cmu_tartan_00-0</td><td>6.3</td><td>92.7</td><td>ETHZ-00-1</td><td>8.0</td><td>68.4</td></tr><tr><td>cmu_tartan_01-0</td><td>9.3</td><td>89.6</td><td>BASE: trm-aug-shared</td><td>8.0</td><td>68.0</td></tr><tr><td>cmu_tartan_01-1</td><td>9.3</td><td>89.4</td><td>BASE: trm-aug-single</td><td>8.0</td><td>68.0</td></tr><tr><td>cmu_tartan_02-1</td><td colspan=\"2\">10.0 80.9</td><td>cmu_tartan_01-1</td><td>8.0</td><td>68.0</td></tr><tr><td>ETHZ-00-1</td><td>6.7</td><td>94.7</td><td>NYU-CUBoulder-01-0</td><td>8.0</td><td>67.9</td></tr><tr><td>BASE: trm-shared</td><td>6.7</td><td>94.2</td><td>BASE: trm-shared</td><td>8.0</td><td>67.7</td></tr><tr><td>BASE: trm-aug-shared</td><td>6.7</td><td>94.0</td><td>BASE: trm-single</td><td>8.0</td><td>67.7</td></tr><tr><td>IMS-00-0</td><td>6.7</td><td>93.6</td><td>cmu_tartan_00-0</td><td>8.0</td><td>67.6</td></tr><tr><td>BASE: mono-aug-single</td><td>6.7</td><td>93.5</td><td>cmu_tartan_01-0</td><td>8.0</td><td>67.6</td></tr><tr><td>NYU-CUBoulder-03-0</td><td colspan=\"2\">12.3 93.7</td><td>BASE: mono-shared</td><td>8.0</td><td>66.8</td></tr><tr><td>flexica-02-1</td><td>9.3</td><td>92.9</td><td>BASE: mono-single</td><td>8.0</td><td>66.8</td></tr><tr><td>ETHZ-02-1</td><td>9.3</td><td>92.3</td><td>NYU-CUBoulder-02-0</td><td>8.0</td><td>66.5</td></tr><tr><td>flexica-03-1</td><td>9.3</td><td>92.1</td><td>deepspin-02-1</td><td>8.0</td><td>66.5</td></tr><tr><td>BASE: mono-shared</td><td>9.3</td><td>91.5</td><td>deepspin-01-1</td><td colspan=\"2\">24.0 65.1</td></tr><tr><td>*CU7565-01-0</td><td colspan=\"2\">19.3 93.7</td><td>NYU-CUBoulder-03-0</td><td colspan=\"2\">24.0 64.7</td></tr><tr><td colspan=\"3\">BASE: mono-aug-shared 16.0 89.8</td><td>NYU-CUBoulder-04-0</td><td colspan=\"2\">26.0 61.8</td></tr><tr><td>CU7565-02-0</td><td colspan=\"2\">15.0 91.6</td><td>CU7565-02-0</td><td colspan=\"2\">27.0 55.5</td></tr><tr><td>cmu_tartan_00-1</td><td colspan=\"2\">17.7 91.7</td><td>LTI-00-1</td><td colspan=\"2\">28.0 44.9</td></tr><tr><td>LTI-00-1</td><td colspan=\"2\">17.7 91.3</td><td>flexica-01-1</td><td colspan=\"2\">28.0 41.5</td></tr><tr><td>flexica-01-1</td><td colspan=\"2\">28.3 73.4</td><td>*CU7565-01-0</td><td>30.0</td><td>0.0</td></tr><tr><td>Oracle (Baselines)</td><td/><td>98.7</td><td>Oracle (Baselines)</td><td/><td>86.9</td></tr><tr><td>Oracle (Submissions)</td><td/><td>99.7</td><td>Oracle (Submissions)</td><td/><td>98.7</td></tr><tr><td>Oracle (All)</td><td/><td>99.8</td><td>Oracle (All)</td><td/><td>98.8</td></tr><tr><td colspan=\"3\">(a) Results on the Afro-Asiatic family (3 languages)</td><td colspan=\"3\">(b) Results on the Algic family (1 language)</td></tr></table>", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF12": { |
|
"text": "", |
|
"num": null, |
|
"content": "<table><tr><td>System</td><td colspan=\"2\">Rank Acc</td><td>System</td><td colspan=\"2\">Rank Acc</td></tr><tr><td>CULing-01-0</td><td>1.0</td><td>84.4</td><td>IMS-00-0</td><td>1.0</td><td>87.6</td></tr><tr><td>IMS-00-0</td><td>1.6</td><td>85.1</td><td>CULing-01-0</td><td>1.0</td><td>87.0</td></tr><tr><td>NYU-CUBoulder-03-0</td><td>1.6</td><td>83.6</td><td>BASE: trm-aug-shared</td><td>1.0</td><td>86.8</td></tr><tr><td>ETHZ-00-1</td><td>1.6</td><td>83.4</td><td>cmu_tartan_00-0</td><td>1.0</td><td>86.3</td></tr><tr><td>NYU-CUBoulder-01-0</td><td>1.6</td><td>82.9</td><td>cmu_tartan_01-1</td><td>1.0</td><td>86.3</td></tr><tr><td>NYU-CUBoulder-04-0</td><td>1.6</td><td>82.9</td><td>BASE: trm-aug-single</td><td>1.0</td><td>85.9</td></tr><tr><td>BASE: trm-shared</td><td>1.6</td><td>82.8</td><td>BASE: trm-shared</td><td>1.0</td><td>85.8</td></tr><tr><td>NYU-CUBoulder-02-0</td><td>1.6</td><td>82.7</td><td>ETHZ-02-1</td><td>1.0</td><td>85.5</td></tr><tr><td>deepspin-02-1</td><td>3.2</td><td>82.4</td><td>cmu_tartan_01-0</td><td>5.0</td><td>85.7</td></tr><tr><td>BASE: trm-aug-single</td><td>3.2</td><td>81.6</td><td>deepspin-02-1</td><td>5.0</td><td>85.6</td></tr><tr><td>*CU7565-01-0</td><td>6.8</td><td>82.7</td><td>cmu_tartan_02-1</td><td>5.0</td><td>85.5</td></tr><tr><td>uiuc-01-0</td><td>5.4</td><td>82.3</td><td>BASE: trm-single</td><td>5.0</td><td>85.4</td></tr><tr><td>BASE: trm-single</td><td>6.0</td><td>81.2</td><td>uiuc-01-0</td><td>5.0</td><td>85.3</td></tr><tr><td>BASE: mono-aug-shared</td><td>6.0</td><td>82.9</td><td>deepspin-01-1</td><td>5.0</td><td>85.2</td></tr><tr><td>LTI-00-1</td><td>6.0</td><td>82.0</td><td>LTI-00-1</td><td>5.0</td><td>85.0</td></tr><tr><td>BASE: mono-aug-single</td><td>7.8</td><td>81.3</td><td>ETHZ-00-1</td><td>5.0</td><td>84.9</td></tr><tr><td>deepspin-01-1</td><td>7.6</td><td>81.0</td><td>BASE: mono-single</td><td>5.0</td><td>84.8</td></tr><tr><td>BASE: trm-aug-shared</td><td>7.6</td><td>79.8</td><td>BASE: mono-aug-single</td><td>5.0</td><td>84.1</td></tr><tr><td>flexica-03-1</td><td>7.6</td><td>79.3</td><td>NYU-CUBoulder-02-0</td><td colspan=\"2\">12.0 82.2</td></tr><tr><td>cmu_tartan_00-0</td><td>8.2</td><td>79.1</td><td>NYU-CUBoulder-01-0</td><td colspan=\"2\">12.0 82.2</td></tr><tr><td>BASE: mono-shared</td><td colspan=\"2\">10.4 79.2</td><td>NYU-CUBoulder-03-0</td><td colspan=\"2\">12.0 82.1</td></tr><tr><td>BASE: mono-single</td><td colspan=\"2\">10.4 77.6</td><td>NYU-CUBoulder-04-0</td><td colspan=\"2\">12.0 81.9</td></tr><tr><td>cmu_tartan_00-1</td><td colspan=\"2\">12.8 80.3</td><td>CU7565-02-0</td><td colspan=\"2\">14.5 81.4</td></tr><tr><td>cmu_tartan_02-1</td><td colspan=\"2\">12.8 78.9</td><td>flexica-02-1</td><td colspan=\"2\">16.5 83.7</td></tr><tr><td>cmu_tartan_01-0</td><td colspan=\"2\">12.8 78.6</td><td>BASE: mono-shared</td><td colspan=\"2\">16.5 83.7</td></tr><tr><td>flexica-02-1</td><td colspan=\"2\">12.8 78.3</td><td>flexica-03-1</td><td colspan=\"2\">16.5 83.0</td></tr><tr><td>cmu_tartan_01-1</td><td colspan=\"2\">12.8 78.2</td><td>cmu_tartan_00-1</td><td colspan=\"2\">19.0 62.6</td></tr><tr><td>ETHZ-02-1</td><td colspan=\"2\">12.0 77.4</td><td colspan=\"3\">BASE: mono-aug-shared 23.5 79.7</td></tr><tr><td>*CU7565-02-0</td><td colspan=\"2\">22.4 73.7</td><td>flexica-01-1</td><td colspan=\"2\">28.5 56.9</td></tr><tr><td>flexica-01-1</td><td colspan=\"2\">21.2 69.7</td><td>*CU7565-01-0</td><td>30.0</td><td>0.0</td></tr><tr><td>Oracle (Baselines)</td><td/><td>89.1</td><td>Oracle (Baselines)</td><td/><td>95.9</td></tr><tr><td>Oracle (Submissions)</td><td/><td>93.5</td><td>Oracle (Submissions)</td><td/><td>98.2</td></tr><tr><td>Oracle (All)</td><td/><td>93.7</td><td>Oracle (All)</td><td/><td>98.6</td></tr><tr><td colspan=\"3\">(a) Results on the Austronesian family (5 languages)</td><td colspan=\"3\">(b) Results on the Dravidian family (2 languages)</td></tr></table>", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF13": { |
|
"text": "", |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF15": { |
|
"text": "", |
|
"num": null, |
|
"content": "<table><tr><td>System</td><td colspan=\"2\">Rank Acc</td><td>System</td><td colspan=\"2\">Rank Acc</td></tr><tr><td>uiuc-01-0</td><td>1.0</td><td>87.5</td><td>deepspin-01-1</td><td>1.0</td><td>85.1</td></tr><tr><td>BASE: trm-single</td><td>2.0</td><td>86.2</td><td>deepspin-02-1</td><td>1.0</td><td>85.0</td></tr><tr><td>CULing-01-0</td><td>3.1</td><td>86.7</td><td>LTI-00-1</td><td>1.0</td><td>84.7</td></tr><tr><td>deepspin-02-1</td><td>3.4</td><td>85.4</td><td>uiuc-01-0</td><td>1.0</td><td>84.4</td></tr><tr><td>deepspin-01-1</td><td>3.4</td><td>85.3</td><td>BASE: trm-single</td><td>1.0</td><td>84.4</td></tr><tr><td>NYU-CUBoulder-04-0</td><td>6.4</td><td>84.2</td><td>BASE: trm-shared</td><td>1.0</td><td>84.4</td></tr><tr><td>BASE: mono-single</td><td>7.9</td><td>82.4</td><td>CULing-01-0</td><td>1.0</td><td>84.1</td></tr><tr><td>NYU-CUBoulder-03-0</td><td>8.4</td><td>83.5</td><td>ETHZ-02-1</td><td>1.0</td><td>83.8</td></tr><tr><td>BASE: mono-aug-single</td><td>6.1</td><td>83.5</td><td>flexica-02-1</td><td>1.0</td><td>83.7</td></tr><tr><td>BASE: mono-shared</td><td>8.2</td><td>82.9</td><td>cmu_tartan_01-1</td><td>1.0</td><td>83.4</td></tr><tr><td>NYU-CUBoulder-02-0</td><td>9.1</td><td>83.5</td><td>BASE: mono-aug-shared</td><td>1.0</td><td>83.4</td></tr><tr><td>IMS-00-0</td><td colspan=\"2\">10.3 83.3</td><td>BASE: mono-aug-single</td><td>1.0</td><td>83.4</td></tr><tr><td>LTI-00-1</td><td>9.4</td><td>82.4</td><td>NYU-CUBoulder-01-0</td><td>1.0</td><td>83.4</td></tr><tr><td>NYU-CUBoulder-01-0</td><td>9.4</td><td>83.6</td><td>IMS-00-0</td><td>1.0</td><td>83.3</td></tr><tr><td>BASE: mono-aug-shared</td><td>9.8</td><td>82.0</td><td>BASE: trm-aug-single</td><td>1.0</td><td>83.3</td></tr><tr><td>cmu_tartan_00-0</td><td colspan=\"2\">13.9 78.5</td><td>BASE: trm-aug-shared</td><td>1.0</td><td>83.3</td></tr><tr><td>cmu_tartan_01-1</td><td colspan=\"2\">14.9 78.5</td><td>BASE: mono-shared</td><td>1.0</td><td>83.2</td></tr><tr><td>cmu_tartan_02-1</td><td colspan=\"2\">15.2 78.2</td><td>BASE: mono-single</td><td>1.0</td><td>83.2</td></tr><tr><td>BASE: trm-shared</td><td colspan=\"2\">14.5 80.2</td><td>cmu_tartan_00-0</td><td>1.0</td><td>83.1</td></tr><tr><td>BASE: trm-aug-shared</td><td colspan=\"2\">20.3 73.8</td><td>cmu_tartan_02-1</td><td>1.0</td><td>83.1</td></tr><tr><td>flexica-01-1</td><td colspan=\"2\">26.3 47.2</td><td>cmu_tartan_00-1</td><td>1.0</td><td>83.0</td></tr><tr><td>BASE: trm-aug-single</td><td>7.4</td><td>84.3</td><td>NYU-CUBoulder-03-0</td><td colspan=\"2\">22.0 82.8</td></tr><tr><td>cmu_tartan_00-1</td><td colspan=\"2\">14.1 79.0</td><td>ETHZ-00-1</td><td colspan=\"2\">22.0 82.8</td></tr><tr><td>ETHZ-02-1</td><td colspan=\"2\">14.0 81.4</td><td>cmu_tartan_01-0</td><td colspan=\"2\">22.0 82.7</td></tr><tr><td>CU7565-02-0</td><td colspan=\"2\">20.9 75.1</td><td>NYU-CUBoulder-02-0</td><td colspan=\"2\">22.0 82.6</td></tr><tr><td>cmu_tartan_01-0</td><td colspan=\"2\">18.3 76.5</td><td>flexica-03-1</td><td colspan=\"2\">22.0 82.5</td></tr><tr><td>*CU7565-01-0</td><td colspan=\"2\">27.8 81.0</td><td>NYU-CUBoulder-04-0</td><td colspan=\"2\">22.0 81.7</td></tr><tr><td>ETHZ-00-1</td><td colspan=\"2\">25.4 70.5</td><td>flexica-01-1</td><td colspan=\"2\">28.0 70.6</td></tr><tr><td>flexica-02-1</td><td colspan=\"2\">25.6 67.0</td><td>CU7565-02-0</td><td colspan=\"2\">28.0 67.9</td></tr><tr><td>flexica-03-1</td><td colspan=\"2\">26.1 64.2</td><td>*CU7565-01-0</td><td>30.0</td><td>0.0</td></tr><tr><td>Oracle (Baselines)</td><td/><td>94.1</td><td>Oracle (Baselines)</td><td/><td>91.3</td></tr><tr><td>Oracle (Submissions)</td><td/><td>96.2</td><td>Oracle (Submissions)</td><td/><td>96.0</td></tr><tr><td>Oracle (All)</td><td/><td>96.7</td><td>Oracle (All)</td><td/><td>96.2</td></tr><tr><td colspan=\"3\">(a) Results on the Oto-Manguean family (10 languages)</td><td colspan=\"3\">(b) Results on the Sino-Tibetan family (1 language)</td></tr></table>", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF16": { |
|
"text": "", |
|
"num": null, |
|
"content": "<table><tr><td>System</td><td colspan=\"2\">Rank Acc</td></tr><tr><td>BASE: trm-single</td><td>1.0</td><td>91.8</td></tr><tr><td>BASE: trm-aug-single</td><td>1.0</td><td>91.8</td></tr><tr><td>uiuc-01-0</td><td/><td/></tr></table>", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF18": { |
|
"text": "", |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF20": { |
|
"text": "", |
|
"num": null, |
|
"content": "<table><tr><td>System</td><td colspan=\"2\">Rank Acc</td></tr><tr><td>deepspin-02-1</td><td>3.4</td><td>87.1</td></tr><tr><td>deepspin-01-1</td><td>4.6</td><td>87.0</td></tr><tr><td>uiuc-01-0</td><td>3.5</td><td>87.4</td></tr><tr><td>BASE: trm-single</td><td>3.1</td><td>87.5</td></tr><tr><td>CULing-01-0</td><td>3.5</td><td>88.3</td></tr><tr><td>BASE: trm-aug-single</td><td>4.9</td><td>87.4</td></tr><tr><td>IMS-00-0</td><td colspan=\"2\">15.1 83.1</td></tr><tr><td>BASE: mono-single</td><td>5.3</td><td>86.3</td></tr><tr><td>BASE: mono-aug-single</td><td>6.8</td><td>86.3</td></tr><tr><td>NYU-CUBoulder-04-0</td><td colspan=\"2\">10.2 85.2</td></tr><tr><td>NYU-CUBoulder-02-0</td><td colspan=\"2\">13.1 83.3</td></tr><tr><td>NYU-CUBoulder-03-0</td><td colspan=\"2\">12.0 84.4</td></tr><tr><td>LTI-00-1</td><td colspan=\"2\">11.1 84.3</td></tr><tr><td>cmu_tartan_00-1</td><td>9.8</td><td>79.5</td></tr><tr><td>NYU-CUBoulder-01-0</td><td colspan=\"2\">14.5 83.0</td></tr><tr><td colspan=\"3\">BASE: mono-aug-shared 13.2 84.4</td></tr><tr><td>cmu_tartan_01-0</td><td colspan=\"2\">11.1 78.9</td></tr><tr><td>cmu_tartan_01-1</td><td colspan=\"2\">11.1 78.8</td></tr><tr><td>cmu_tartan_00-0</td><td colspan=\"2\">10.8 79.3</td></tr><tr><td>BASE: trm-shared</td><td colspan=\"2\">19.5 77.7</td></tr><tr><td>BASE: trm-aug-shared</td><td colspan=\"2\">19.5 79.1</td></tr><tr><td>BASE: mono-shared</td><td colspan=\"2\">11.7 83.7</td></tr><tr><td>cmu_tartan_02-1</td><td colspan=\"2\">13.2 78.5</td></tr><tr><td>CU7565-02-0</td><td colspan=\"2\">19.4 78.6</td></tr><tr><td>ETHZ-02-1</td><td colspan=\"2\">18.9 76.4</td></tr><tr><td>flexica-01-1</td><td colspan=\"2\">26.2 66.6</td></tr><tr><td>flexica-03-1</td><td colspan=\"2\">25.5 66.5</td></tr><tr><td>flexica-02-1</td><td colspan=\"2\">25.9 64.2</td></tr><tr><td>ETHZ-00-1</td><td colspan=\"2\">27.1 60.1</td></tr><tr><td>*CU7565-01-0</td><td>30.0</td><td>0.0</td></tr><tr><td>Oracle (Baselines)</td><td/><td>97.0</td></tr><tr><td>Oracle (Submissions)</td><td/><td>98.4</td></tr><tr><td>Oracle (All)</td><td/><td>98.9</td></tr><tr><td colspan=\"3\">(a) Results on the Germanic genus (13 languages)</td></tr></table>", |
|
"type_str": "table", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |