|
{ |
|
"paper_id": "U13-1003", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T03:10:17.275455Z" |
|
}, |
|
"title": "Classifying English Documents by National Dialect", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Lui", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The University of Melbourne", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Cook", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The University of Melbourne", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Victoria", |
|
"middle": [], |
|
"last": "3010", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Australia", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We investigate national dialect identification, the task of classifying English documents according to their country of origin. We use corpora of known national origin as a proxy for national dialect. In order to identify general (as opposed to corpus-specific) characteristics of national dialects of English, we make use of a variety of corpora of different sources, with inter-corpus variation in length, topic and register. The central intuition is that features that are predictive of national origin across different data sources are features that characterize a national dialect. We examine a number of classification approaches motivated by different areas of research, and evaluate the performance of each method across 3 national dialects: Australian, British, and Canadian English. Our results demonstrate that there are lexical and syntactic characteristics of each national dialect that are consistent across data sources.", |
|
"pdf_parse": { |
|
"paper_id": "U13-1003", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We investigate national dialect identification, the task of classifying English documents according to their country of origin. We use corpora of known national origin as a proxy for national dialect. In order to identify general (as opposed to corpus-specific) characteristics of national dialects of English, we make use of a variety of corpora of different sources, with inter-corpus variation in length, topic and register. The central intuition is that features that are predictive of national origin across different data sources are features that characterize a national dialect. We examine a number of classification approaches motivated by different areas of research, and evaluate the performance of each method across 3 national dialects: Australian, British, and Canadian English. Our results demonstrate that there are lexical and syntactic characteristics of each national dialect that are consistent across data sources.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The English language exhibits substantial variation in its usage throughout the world with regional differences being noted at the lexical and syntactic levels (e.g., Trudgill and Hannah, 2008) between varieties of English such as that used in Britain and the United States. Although there are many varieties of English throughout the world -including, for example, New Zealand English, African American Vernacular English, and Indian English -there are a smaller number of so-called standard Englishes. British English and American English (or North American English) are often taken to be the two main varieties of standard English (Trudgill and Hannah, 2008; Quirk, 1995) , with other varieties of standard English, such as Canadian English and Australian English, viewed as more-similar to one of these main varieties.", |
|
"cite_spans": [ |
|
{ |
|
"start": 167, |
|
"end": 193, |
|
"text": "Trudgill and Hannah, 2008)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 634, |
|
"end": 661, |
|
"text": "(Trudgill and Hannah, 2008;", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 662, |
|
"end": 674, |
|
"text": "Quirk, 1995)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The theme of this work is national dialect identification, the classification of documents as one of a closed set of candidate standard Englishes (hereafter referred to as dialects), by exploiting lexical and syntactic variation between dialects. We make use of corpora of text of known national origin as a proxy for text of each dialect. Specifically, we consider Australian English, British English, and Canadian English, three so-called \"inner circle\" standard Englishes (Jenkins, 2009) . 1 This preliminary work aims to establish whether standard approaches to text classification are able to accurately predict the variety of standard English in which a document is written. The notion of standard English is differentiated from other factors such as style (e.g., formality) or topic Trudgill (1999) , which are expected confounding factors. A model of dialect classification built on a single text type (e.g., standard national corpora) may be classifying documents on the basis of nondialectal differences such as topic or genre. In order to control for the confounding factors, we utilize text from a variety of sources. By drawing training and test data from different sources, the successful transfer of models from one text source to another is evidence that the classifier is indeed capturing differences between different documents that are dialectal, rather than being due to any of the aforementioned confounding factors.", |
|
"cite_spans": [ |
|
{ |
|
"start": 475, |
|
"end": 490, |
|
"text": "(Jenkins, 2009)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 493, |
|
"end": 494, |
|
"text": "1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 790, |
|
"end": 805, |
|
"text": "Trudgill (1999)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The main contributions of this paper are: (1) we introduce national dialect identification as a classification task, (2) we relate national dialect identification to existing research on text classification, (3) we assemble a dataset for national dialect identification using corpora from a variety of sources, (4) we empirically evaluate a number of text classification methods for national dialect identification, and (5) we find that we can train classifiers that are able to predict the national dialect of documents across data sources.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "National dialect identification is conceptually related to a range of established text classification tasks. In this section, we give some background on related areas, deferring the description of the specific methods we implement to Section 3.2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Text categorization has been described as the intersection of machine learning and information retrieval (Sebastiani, 2005) , and is focused on tasks such as mapping newswire documents onto the topics they discuss (Debole and Sebastiani, 2005) . A large variety of methods have been examined in the literature, due to the large overlap with the machine learning community (Sebastiani, 2002) . One approach that has been shown to consistently perform well is the use of Support Vector Machines (SVM, Cortes and Vapnik, 1995) . Joachims (1998) argued for their use in text categorization, observing that SVMs were well suited due to their ability to handle high-dimensional input spaces with few irrelevant features. Furthermore, he observed that most text categorization problems are linearly separable, a view that has been validated in a variety of studies (e.g., Yang and Liu, 1999; Drucker et al., 1999) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 105, |
|
"end": 123, |
|
"text": "(Sebastiani, 2005)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 214, |
|
"end": 243, |
|
"text": "(Debole and Sebastiani, 2005)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 372, |
|
"end": 390, |
|
"text": "(Sebastiani, 2002)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 499, |
|
"end": 523, |
|
"text": "Cortes and Vapnik, 1995)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 526, |
|
"end": 541, |
|
"text": "Joachims (1998)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 865, |
|
"end": 884, |
|
"text": "Yang and Liu, 1999;", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 885, |
|
"end": 906, |
|
"text": "Drucker et al., 1999)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text Categorization", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Language identification is the task of classifying a document according to the natural language it is written in. Recent work has applied language identification techniques to the identification of Dutch dialects, with encouraging results (Trieschnigg et al., 2012) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 239, |
|
"end": 265, |
|
"text": "(Trieschnigg et al., 2012)", |
|
"ref_id": "BIBREF38" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Language Identification", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Authorship profiling is an umbrella term for classification tasks that involve inferring some characteristic of a document's author, such as age, gender and native language (Estival et al., 2007) . Native language identification (NLI, Koppel et al., 2005 ) is a well established authorship profiling task. The aim of NLI is to classify a document with respect to an author's native language, where this is not the language that the document is written in. One approach to NLI is to capture grammatical errors made by authors, through the use of contrastive analysis (Wong and Dras, 2009) , parse structures (Wong and Dras, 2011) or adaptor grammars (Wong et al., 2012) . Brooke and Hirst (2012) test a broad array of approaches to NLI, and specifically highlight issues with in-domain evaluation thereof.", |
|
"cite_spans": [ |
|
{ |
|
"start": 173, |
|
"end": 195, |
|
"text": "(Estival et al., 2007)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 235, |
|
"end": 254, |
|
"text": "Koppel et al., 2005", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 566, |
|
"end": 587, |
|
"text": "(Wong and Dras, 2009)", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 607, |
|
"end": 628, |
|
"text": "(Wong and Dras, 2011)", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 649, |
|
"end": 668, |
|
"text": "(Wong et al., 2012)", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 671, |
|
"end": 694, |
|
"text": "Brooke and Hirst (2012)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Native Language Identification (NLI)", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Authorship profiling focuses on identifying features which vary between groups of authors but are fairly consistent for a given group. In contrast, authorship attribution is the task of mapping a document onto a particular author from a set of candidate authors (Stamatatos, 2009) , and is sometimes incorrectly conflated with authorship profiling. Mosteller and Wallace (1964) used a set of function words to attribute papers of disputed authorship. Other stylometric features used to identify authors include average sentence and word length (Yule, 1939) . Modern features used for authorship attribution include distributions over function words (Zhao and Zobel, 2005) , as well as features derived from parsing and part-of-speech tagging (Hirst and Feiguina, 2007) . Author-aware topic models have also been proposed for authorship attribution (Seroussi et al., 2012) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 262, |
|
"end": 280, |
|
"text": "(Stamatatos, 2009)", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 349, |
|
"end": 377, |
|
"text": "Mosteller and Wallace (1964)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 544, |
|
"end": 556, |
|
"text": "(Yule, 1939)", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 649, |
|
"end": 671, |
|
"text": "(Zhao and Zobel, 2005)", |
|
"ref_id": "BIBREF49" |
|
}, |
|
{ |
|
"start": 742, |
|
"end": 768, |
|
"text": "(Hirst and Feiguina, 2007)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 848, |
|
"end": 871, |
|
"text": "(Seroussi et al., 2012)", |
|
"ref_id": "BIBREF35" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Authorship Attribution", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Social media has recently exploded in popularity, with Twitter reporting that roughly 500 million tweets are sent each day (Twitter, 2013) . There is a relationship between textual content and geolocation, with for example, texts containing words such as streetcar, Maple Leafs, and DVP likely being related to Toronto, Canada (Han et al., 2012) . Eisenstein et al. (2010) apply techniques from topic modeling to study variation in word usage on Twitter in the United States. Of particular relevance to our work, Wing and Baldridge (2011) and Roller et al. (2012) aggregate the tweets of users to predict their physical location in grid-based representations of the continental United States. These methods consider the KL-divergence between the distribution of words in a user's aggregated tweets and that of the tweets known to originate from each grid cell, with the most-similar cell being selected as the target user's most-likely location.", |
|
"cite_spans": [ |
|
{ |
|
"start": 123, |
|
"end": 138, |
|
"text": "(Twitter, 2013)", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 327, |
|
"end": 345, |
|
"text": "(Han et al., 2012)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 348, |
|
"end": 372, |
|
"text": "Eisenstein et al. (2010)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 543, |
|
"end": 563, |
|
"text": "Roller et al. (2012)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text-based Geolocation", |
|
"sec_num": "2.5" |
|
}, |
|
{ |
|
"text": "Although the specific issue of English national dialect classification has not been considered to date, a small number of computational studies have examined issues related to dialects. For example, Atwell et al. (2007) consider which variety of English, British or American, is most common on the Web. Peirsman et al. (2010) use techniques based on distributional similarity to identify lectal markers -words characteristic of one dialect versus another due to differences in sense or frequencyof dialects of Dutch. Zaidan and Callison-Burch (2012) studied dialect identification in Arabic dialects using automatic classifiers, and found that classifiers using dialectal data outperformed an informed baseline, achieving near-human classification accuracy.", |
|
"cite_spans": [ |
|
{ |
|
"start": 199, |
|
"end": 219, |
|
"text": "Atwell et al. (2007)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 303, |
|
"end": 325, |
|
"text": "Peirsman et al. (2010)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Computational Dialectal Studies", |
|
"sec_num": "2.6" |
|
}, |
|
{ |
|
"text": "Of particular relevance to our work, Cook and Hirst (2012) consider whether Web corpora from top-level domains (specifically .ca and .uk, in their work) represent corresponding national dialects (Canadian English and British English, respectively). They find that the relative distribution of spelling variants (e.g., the frequency of color relative to that of colour) is quite consistent across corpora of known national dialect. Furthermore, they show that these distributions are similar for corpora of known national dialect and Web corpora from a corresponding top-level domain.", |
|
"cite_spans": [ |
|
{ |
|
"start": 37, |
|
"end": 58, |
|
"text": "Cook and Hirst (2012)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Computational Dialectal Studies", |
|
"sec_num": "2.6" |
|
}, |
|
{ |
|
"text": "National dialect identification is a classification task, where each document must be mapped onto a single national dialect from a closed set of candidate dialects. We evaluate each method by training a classifier on a set of training documents and applying it to an independent set of test documents. For each experiment, we compute per-class precision, recall and F-score, using their standard definitions. We focus our evaluation on F-score, macroaveraged over all the per-class values, in order to maintain balance across precision and recall and across individual classes.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "A key challenge in evaluating national dialect identification as a text classification task is that documents in the training data may exhibit some non-dialectal variation that the classifiers may pick up on. For example, if British English were represented by a balanced corpus such as the British National Corpus (Burnard, 2000) , but a corpus of say, newspaper texts, were used for American English (e.g., The New York Times Annotated Corpus, Sandhaus, 2008 ) then a classifier trained to distinguish between documents of these two corpora may pick up on differences in genre and topic as opposed to national dialect. Even if more-comparable corpora than those just mentioned above were chosen, because a corpus is a sample, certain topics or words will tend to be over-or under-represented. Indeed Kilgarriff (2001) points out such issues in the context of keyword comparisons of comparable corpora of British and American English, and Brooke and Hirst (2012) specifically highlight the same issue in native language identification.", |
|
"cite_spans": [ |
|
{ |
|
"start": 315, |
|
"end": 330, |
|
"text": "(Burnard, 2000)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 446, |
|
"end": 460, |
|
"text": "Sandhaus, 2008", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 802, |
|
"end": 819, |
|
"text": "Kilgarriff (2001)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 940, |
|
"end": 963, |
|
"text": "Brooke and Hirst (2012)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Cross-domain classification", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In an effort to avoid this pitfall, we utilize text of known national origin from a variety of different sources. Specifically, we collect text representing each national dialect from up to 4 different sources (Section 4). In this paper, following the terminology of Pan and Yang 2010, we refer to each source as a domain, and acknowledge that this does not correspond to the topical sense of the term domain that is more common in NLP.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Cross-domain classification", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We cross-validate by holding out each source in turn, training a classifier on the union of the remaining sources and then applying it to the heldout source. By carrying out cross-domain classification, we mitigate the risk that confounding factors such as topic, genre or document length will misleadingly give high classification accuracy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Cross-domain classification", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We select methods from each field (Section 2) that are promising for national dialect identification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Classification Methods", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We use a random classifier as our baseline, eschewing majority-class as it is not applicable in the cross-source context we consider; one of the primary differences anticipated between sources is that the relative distribution of classes will vary. The random classifier maps each document onto a dialect from our dialect set independently. It represents a trivial baseline that we expect all other classifiers to exceed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BASELINE", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "We use the general text categorization approach proposed by Joachims (1998) , applying a linear SVM to a standard bag-of-words representation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 60, |
|
"end": 75, |
|
"text": "Joachims (1998)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "TEXTCATEGORIZATION", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "We use part-of-speech plus function word n-grams with a maximum entropy classifier (Wong and Dras, 2009) . Wong and Dras aim to exploit grammatical errors, as contrastive analysis suggests that difficulties in acquiring a new language are due to differences between the new language and the native language of the learner, implying that the types of errors made are characteristic of the native language of the author. In national dialect identification, we do not expect grammatical errors to be as salient, because English is a national language of each of the countries considered. Nevertheless, part-of-speech plus function word n-grams are of interest because they roughly capture syntaxwhich is known to vary amongst national dialects (Trudgill and Hannah, 2008) -and are independent of the specific lexicalization.", |
|
"cite_spans": [ |
|
{ |
|
"start": 83, |
|
"end": 104, |
|
"text": "(Wong and Dras, 2009)", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 741, |
|
"end": 768, |
|
"text": "(Trudgill and Hannah, 2008)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NATIVELID", |
|
"sec_num": "3.2.3" |
|
}, |
|
{ |
|
"text": "Authorship attribution is about modeling the linguistic idiosyncrasies of a particular author, in terms of some markers of the individual's style. Although in national dialect identification we do not assume that each document has a single unique author, we do assume that documents from the same country share stylistic properties resulting from the national dialect. We hypothesize that this results in systematic differences in the choice of function words (Zhao and Zobel, 2005) . We capture this using a distribution over function words, which is a restricted bag-of-words model, where only words on an externally specified 'whitelist' are retained. We use the same stopword list as for native language identification as a proxy for function words. As per Zhao and Zobel (2005) , we apply a naive Bayes classifier.", |
|
"cite_spans": [ |
|
{ |
|
"start": 460, |
|
"end": 482, |
|
"text": "(Zhao and Zobel, 2005)", |
|
"ref_id": "BIBREF49" |
|
}, |
|
{ |
|
"start": 761, |
|
"end": 782, |
|
"text": "Zhao and Zobel (2005)", |
|
"ref_id": "BIBREF49" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "AUTHORSHIPATTRIB", |
|
"sec_num": "3.2.4" |
|
}, |
|
{ |
|
"text": "We treat each dialect as a distinct language, and apply the language identification method of Lui and Baldwin (2011) in which documents are represented using a mixture of specially-selected byte sequences. The method specifically exploits differences in data sources to learn a set of byte sequences that is representative of languages (or in our case, dialects) across all the data sources. This feature selection is done by scoring each sequence using information gain (IG, Quinlan, 1993) , with respect to each dialect as well as with each data source. This representation is then combined with a multinomial naive Bayes classifier.", |
|
"cite_spans": [ |
|
{ |
|
"start": 94, |
|
"end": 116, |
|
"text": "Lui and Baldwin (2011)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 476, |
|
"end": 490, |
|
"text": "Quinlan, 1993)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "LANGID", |
|
"sec_num": "3.2.5" |
|
}, |
|
{ |
|
"text": "Our geolocation classifier is a nearest-prototype classifier using K-L divergence as the distance metric on a standard bag-of-words (Wing and Baldridge, 2011). The class prototypes are calculated from the concatenation of all members of the class. For both documents and classes, probability mass is assigned to unseen terms using a pseudo-Good-Turing smoothing, the parameters of which we estimate from the training data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "GEOLOCATION", |
|
"sec_num": "3.2.6" |
|
}, |
|
{ |
|
"text": "Motivated by Cook and Hirst's (2012) work on comparing dialects, our variant pair classifier uses the relative frequencies of spelling variants (e.g., color/colour, yoghurt/yogurt) to distinguish between dialects. For each of a set of \u223c1.8k spelling variant pairs from VarCon, 2 we calculate the frequency difference in a document between the first and second variant (e.g., freq(color) \u2212 freq(colour)). A standard vector-space model of similarity is used: each dialect is modeled as the sum of the vectors of all documents for that dialect; Cosine is used to map a given document to the most similar dialect.", |
|
"cite_spans": [ |
|
{ |
|
"start": 13, |
|
"end": 36, |
|
"text": "Cook and Hirst's (2012)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "VARIANTPAIR", |
|
"sec_num": "3.2.7" |
|
}, |
|
{ |
|
"text": "Large corpora are available for British and Canadian English. The written portion of the British National Corpus (BNC, Burnard, 2000) consists of roughly 87 million words of a variety of genres and topics from British authors from the late twentieth century. The Strathy Corpus 3 consists of roughly 40 million words of a variety of text types by Canadian authors from a similar time period. We use these two corpora in this study.", |
|
"cite_spans": [ |
|
{ |
|
"start": 119, |
|
"end": 133, |
|
"text": "Burnard, 2000)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NATIONAL", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Appropriate resources are not available for American or Australian English. The Corpus of Contemporary American English (COCA, Davies, 2009) currently consists of over 450 million words of American English, but can only be accessed through a web interface; the full text form is unavailable. The American National Corpus (ANC, Ide, 2009) is much smaller than the BNC and Strathy Corpus at approximately only 11 million words. 4 In the case of Australian English, the Aus- Table 1 : Characteristics of the ENDIALECT dataset. # is the document count, \u00b5 and \u03c3 are the mean and standard deviation of document length (in words).", |
|
"cite_spans": [ |
|
{ |
|
"start": 127, |
|
"end": 140, |
|
"text": "Davies, 2009)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 327, |
|
"end": 337, |
|
"text": "Ide, 2009)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 426, |
|
"end": 427, |
|
"text": "4", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 472, |
|
"end": 479, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "NATIONAL", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "tralian Corpus of English (Green and Peters, 1991) consists of just 1 million words. 5", |
|
"cite_spans": [ |
|
{ |
|
"start": 26, |
|
"end": 50, |
|
"text": "(Green and Peters, 1991)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NATIONAL", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The Web has been widely used for building corpora (e.g., Baroni et al., 2009; Kilgarriff et al., 2010) with Cook and Hirst (2012) presenting preliminary results suggesting that English corpora from top-level domains might represent corresponding national dialects of English. Australia, Canada, and the United Kingdom all have corresponding top-level domains that contain a wide variety of text types -namely .au, .ca, and .uk, respectively -from which we can build corpora. However, the top-level domain for the United States, .us, is primarily used for morespecialized purposes, such as government, and so a similar Web corpus cannot easily be built for American English. Here we build English Web corpora from .au, .ca, and .uk which -based on the findings of Cook and Hirst (2012) -we assume to represent Australian, Canadian, and British English, respectively. One common method for corpus construction is to issue a large number of queries to a search engine, download the resulting URLs, and postprocess the documents to produce a corpus (e.g., Baroni and Bernardini, 2004; Sharoff, 2006; Kilgarriff et al., 2010) . Cook and Hirst (2012) use such a method to build corpora from the .ca and .uk domains; we follow their approach here. Specifically, we select alphabetic types in the BNC with character length greater than 2 and frequency rank 1001-5000 in the BNC as seed words. We then use Baroni and Bernardini's (2004) Boot-CaT tools to form 18k random 3-tuples from these seeds. We use the BootCaT tools to issue search engine queries for these tuples in the .au, .ca, and .uk domains. Using the BootCaT tools we then download the resulting URLs, and eliminate duplicates. We further eliminate non-English documents using langid.py (Lui and Baldwin, 2012) . Following Cook and Hirst we only retain up to three randomly-selected documents per domain (e.g., www.cbc.ca). The final corpora consist of roughly 77, 96, and 115 million tokens for the .au, .ca, and .uk domains, respectively.", |
|
"cite_spans": [ |
|
{ |
|
"start": 57, |
|
"end": 77, |
|
"text": "Baroni et al., 2009;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 78, |
|
"end": 102, |
|
"text": "Kilgarriff et al., 2010)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 108, |
|
"end": 129, |
|
"text": "Cook and Hirst (2012)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 1052, |
|
"end": 1080, |
|
"text": "Baroni and Bernardini, 2004;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1081, |
|
"end": 1095, |
|
"text": "Sharoff, 2006;", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 1096, |
|
"end": 1120, |
|
"text": "Kilgarriff et al., 2010)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 1123, |
|
"end": 1144, |
|
"text": "Cook and Hirst (2012)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 1397, |
|
"end": 1427, |
|
"text": "Baroni and Bernardini's (2004)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1742, |
|
"end": 1765, |
|
"text": "(Lui and Baldwin, 2012)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "WEB", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The government of each of the countries considered in this study produces an enormous number of documents which can be used to build corpora. Furthermore, because many government websites are in particular second-level domains (e.g., .gov.uk) it is possible to easily construct a Web corpus consisting of such documents.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "WEBGOV (Government)", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "To build governmental Web corpora we follow a very similar process to that in the previous subsection, this time issuing queries for each of .gov.au, .gc.ca, and .gov.uk. 6 The resulting Australian, British, and Canadian government corpora contain roughly 199, 161, and 148 million words, respectively. 7", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "WEBGOV (Government)", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Twitter 8 is an enormously popular micro-blogging service which has previously been used in studies of regional linguistic variation (e.g., Eisenstein et al., 2010) . Twitter allows users to post short (up to 140 characters) messages known as tweets, and a recent report from Twitter indicates that roughly 500 million tweets are sent each day (Twitter, 2013) . Crucially for this project, roughly 1% of tweets include geolocation metadata and 6 In this case there is an obvious domain to use to build an American government corpus, i.e., .gov. However, because we did not have a general Web corpus, or an appropriate national corpus, for American English, we did not build a government corpus for this dialect.", |
|
"cite_spans": [ |
|
{ |
|
"start": 140, |
|
"end": 164, |
|
"text": "Eisenstein et al., 2010)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 344, |
|
"end": 359, |
|
"text": "(Twitter, 2013)", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 444, |
|
"end": 445, |
|
"text": "6", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "TWITTER", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "7 There is a small amount of overlap between WEB and WEBGOV, with 3.7% of the WEB documents coming from governmental second-level domains.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "TWITTER", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "8 http://twitter.com/ can be used to build corpora known to correspond to a particular geographical region. Using the Twitter API we collected a sample of tweets from October 2011 -January 2012 with geotags indicating that they were sent from Australia, Canada, or the United Kingdom. 9 We then filtered this collection to include only English tweets (again using langid.py). The resulting collection includes roughly 140k, 240k, and 1.4M tweets from Australia, Canada, and the United Kingdom, respectively.", |
|
"cite_spans": [ |
|
{ |
|
"start": 285, |
|
"end": 286, |
|
"text": "9", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "TWITTER", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "The ENDIALECT dataset (Table 1) , consists of 109502 documents in 3 English dialects (Australian, British, and Canadian) across 4 text sources (NATIONAL, WEB, WEBGOV and TWIT-TER, described in Section 4). We conducted a pilot study, and found that across all the methods we test, the in-domain classification accuracy did not vary significantly beyond 5000 documents per dialect. Thus, for NATIONAL, WEB and WEBGOV, we retained 10000 documents per dialect. For WEB and WEBGOV, we randomly sampled 10000 documents (without replacement) from each dialect. For NATIONAL, the documents are substantially longer, and furthermore, documents from the (Canadian) Strathy Corpus are on average twice as long as those from the (British) BNC. In order to extract documents of comparable length to the WEB and WEBGOV, we divided each document in NATIONAL into equal-sized fragments (10 fragments per document for the BNC and 20 per document for the Strathy Corpus). We then sampled 10000 fragments from each, yielding pseudodocuments of comparable length to documents from WEB and WEBGOV.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 22, |
|
"end": 31, |
|
"text": "(Table 1)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The ENDIALECT dataset", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Constructing documents from the Twitter data is more difficult because individual messages are very short; preliminary experiments indicated that trying to infer dialect from a single message is nearly impossible. For Twitter, we therefore concatenate all documents from a given user to form a single pseudo-document per user. The Twitter crawl available to us had insufficient data to extract 10000 users per country, so we opted to retain all the users that had 15 or more messages in our data, giving us a total number of user pseudo-documents comparable to the number of documents for our other data sources (albeit with a skew between dialects that is not present for the other text sources).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The ENDIALECT dataset", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The first set of experiments we perform is in a leave-one-out cross-domain learning setting over our 4 text sources (referred to interchangeably as \"domains\") and 7 classification methods. We train one classifier for each pair of classification method and target domain, for a total of 28 classifiers. The training data used for each classifier is leave-oneout over the set of domains. For example, for any given classification method, the classifier applied to WEB is trained on the union of data from NA-TIONAL, WEBGOV, and TWITTER. Table 2 summarizes the macroaveraged F-score for each classifier in the cross-domain classification setting. We find that overall, the best methods for national dialect identification are TEXTCATE-GORIZATION and NATIVELID. We also find that F-score varies greatly between target domains; in general, F-score is highest for NATIONAL, and lowest for TWITTER.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 535, |
|
"end": 542, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In this work, we primarily focus on crossdomain national dialect identification, for reasons discussed in Section 3.1. However, most of the methods we consider were not developed for cross-domain application, and thus in-domain results provide an interesting point of comparison. Hence, we present results from in-domain 10-fold cross-validation in Table 3 for comparison with the cross-domain outcome.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 349, |
|
"end": 356, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Our in-domain results are consistent with our cross-domain findings, in that methods that perform better in-domain tend to also perform better cross-domain, and target domains that are \"easier\" in-domain also tend to be \"easier\" cross-domain, \"easier\" meaning that all methods tend to attain better results. For most methods, the in-domain performance is better than the cross-domain performance, which is not surprising given that it is likely that there are particular terms that are predictive of a dialect in-domain that may not generalize across domains.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Overall, the results on in-domain and crossdomain classification suggest that TEXTCATEGO-RIZATION is consistently the best among the methods compared across multiple domains, and that some domains are inherently easier for national dialect identification than others. To better understand the difference between domains, we conducted a further experiment, where we trained a classifier using each method on data from only one of our domains. We then applied this classifier to every other domain. We conducted this experiment for the two best-performing methods in the cross-domain setting: TEXTCATEGORIZA-TION and NATIVELID. The results of this experiment are summarized in Table 4 . The performance of classifiers trained on all non-test domains is generally better than that of classifiers trained on a single domain. The only exception to this is with classifiers trained on WEB applied to WEBGOV, which could be due to the noted overlap between these domains. However, this relationship is not symmetrical: classifiers trained only on WEBGOV do not perform better on WEB than classifiers trained on WEBGOV +NATIONAL +TWITTER.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 675, |
|
"end": 682, |
|
"text": "Table 4", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The high performance of TEXTCATEGORIZA-TION provides strong evidence of the viability of the cross-domain approach to identifying national dialect. This can be partly attributed to the much larger feature set of this method -to which no feature selection is applied -as compared to the other methods. The total vocabulary across all the datasets amounts to over 3 million unique terms. From this, the SVM algorithm was able to learn parameter weights that were applicable across domains -this can be seen from how the crossdomain text categorization results (Table 2) comfortably exceed the baseline in all domains.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 558, |
|
"end": 567, |
|
"text": "(Table 2)", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "AUTHORSHIPATTRIB uses a set of \u223c 400 function words, in contrast to the \u223c 3 million terms in the text categorization approach. The AUTHOR-SHIPATTRIB results are very close to the baseline in the cross-domain setting, suggesting that stylistic variation as captured by these features is not characteristic of English dialects. F-scores for NATIVELID comfortably exceed the baseline, which suggests that English dialects have systematic differences at the syntactic level. The results are inferior to TEXTCATEGORIZA-TION, indicating that there are specific words that are predictive of national dialect across domains. This suggests there are systematic differences in the topics of discussion between documents of different origin, likely due to the discussion of specific locations. For example, analysis of our results indicates that (unsurprisingly) the term Canada is strongly associated with documents of Canadian origin. (Table 3) are replicated in italics for comparison.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 926, |
|
"end": 935, |
|
"text": "(Table 3)", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "may be due to the small feature set. Lui and Baldwin (2011) select the top 400 features per language over 97 languages, so their feature set consists of 7480 features. We only consider 3 dialects, with a corresponding feature set of 1058 features. Though our features are clearly informative for the task (LANGID results comfortably exceed the baseline), there may be useful information that is lost when a document is mapped into this reduced feature space. LANGID performs exceptionally poorly when applied to TWITTER in a cross-domain setting, because the classifier predicts a minority class 'Australian' for almost all documents. This is likely due to the lack of national corpus training data for 'Australian', as Table 4 suggests that national corpus data are an especially poor proxy for Twitter (a result consistent with the findings of Baldwin et al. (2013) ). The poor performance of the GEOLOCATION is perhaps more surprising, as like TEXTCATEGO-RIZATION this approach makes use of the full bagof-words feature set. However, in the geolocation task of Wing and Baldridge (2011), the class space is much larger, and furthermore it is structured; classes correspond to regions of the Earth's surface, and the distance of the predicted region to the goldstandard region is taken into account in evaluation. The national dialect identification task is much more coarse-grained, potentially making it a poor match for geolocation methods.", |
|
"cite_spans": [ |
|
{ |
|
"start": 846, |
|
"end": 867, |
|
"text": "Baldwin et al. (2013)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 720, |
|
"end": 727, |
|
"text": "Table 4", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The relatively poor performance of LANGID", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "VARIANTPAIR performs poorly throughout, with results below the random baseline in the cross-domain setting. The key difference between our national dialect identification task and the work of Cook and Hirst (2012) is that they classify entire corpora, whereas we classify individual documents. Documents are much shorter than corpora, and contain less spelling variation because they typically have a single author who is unlikely to choose different spellings of a given word.", |
|
"cite_spans": [ |
|
{ |
|
"start": 192, |
|
"end": 213, |
|
"text": "Cook and Hirst (2012)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The relatively poor performance of LANGID", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Our cross-domain classification results strongly suggest that there are characteristics of each national dialect that are consistent across multiple domains. These characteristics go beyond simple topical differences, as representations such as function word distributions, and part-of-speech plus function word bigrams, omit topical information from consideration. Even without topical information, a classifier trained using techniques from native language identification is able to comfortably surpass a random baseline.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "In future work, we intend to analyze the features weighted highly by our classifiers to potentially identify previously-undocumented differences between national dialects. Additionally, work on dialect identification might benefit methods for language identification. Prager (1999) finds that modeling Norwegian dialects separately improves language identification performance. In future work, we will examine if similarly modeling English dialects improves language identification.", |
|
"cite_spans": [ |
|
{ |
|
"start": 268, |
|
"end": 281, |
|
"text": "Prager (1999)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "We don't consider American English because of a rather surprising lack of available resources for this national dialect, discussed in Section 4.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://wordlist.sourceforge.net 3 http://www.queensu.ca/strathy/ 4 This figure refers specifically to the written portion of the Open ANC, the freely-available version of this corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The Australian National Corpus (http://www. ausnc.org.au/) is much larger, but consists of relatively little written material from the same time period as our other corpora.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Although an abundance of geolocated tweets are available for the United States, since we do not have corpora from the other sources for this national dialect we do not consider it here.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Which English dominates the World Wide Web, British or American?", |
|
"authors": [ |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Atwell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junaid", |
|
"middle": [], |
|
"last": "Arshad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chien-Ming", |
|
"middle": [], |
|
"last": "Lai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lan", |
|
"middle": [], |
|
"last": "Nim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josiah", |
|
"middle": [], |
|
"last": "Noushin Rezapour Asheghi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Washtell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the Corpus Linguistics Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eric Atwell, Junaid Arshad, Chien-Ming Lai, Lan Nim, Noushin Rezapour Asheghi, Josiah Wang, and Justin Washtell. 2007. Which English dom- inates the World Wide Web, British or Ameri- can? In Proceedings of the Corpus Linguistics Conference (CL 2007). Birmingham, UK.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "How noisy social media text, how diffrnt social media sources?", |
|
"authors": [ |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Baldwin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Cook", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Lui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mackinlay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the Sixth International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "356--364", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Timothy Baldwin, Paul Cook, Marco Lui, An- drew MacKinlay, and Li Wang. 2013. How noisy social media text, how diffrnt social me- dia sources? In Proceedings of the Sixth In- ternational Joint Conference on Natural Lan- guage Processing, pages 356-364. Asian Feder- ation of Natural Language Processing, Nagoya, Japan.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Boot-CaT: Bootstrapping corpora and terms from the Web", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Baroni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Silvia", |
|
"middle": [], |
|
"last": "Bernardini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the Fourth International Conference on Language Resources and Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Baroni and Silvia Bernardini. 2004. Boot- CaT: Bootstrapping corpora and terms from the Web. In Proceedings of the Fourth Interna- tional Conference on Language Resources and Evaluation (LREC 2004).", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "The WaCky Wide Web: A collection of very large linguistically processed Web-crawled corpora", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Baroni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Silvia", |
|
"middle": [], |
|
"last": "Bernardini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adriano", |
|
"middle": [], |
|
"last": "Ferraresi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eros", |
|
"middle": [], |
|
"last": "Zanchetta", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Language Resources and Evaluation", |
|
"volume": "43", |
|
"issue": "3", |
|
"pages": "209--226", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Baroni, Silvia Bernardini, Adriano Fer- raresi, and Eros Zanchetta. 2009. The WaCky Wide Web: A collection of very large linguis- tically processed Web-crawled corpora. Lan- guage Resources and Evaluation, 43(3):209- 226.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Robust, lexicalized native language identification", |
|
"authors": [ |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Brooke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graeme", |
|
"middle": [], |
|
"last": "Hirst", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "The COLING 2012 Organizing Committee", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "391--408", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Julian Brooke and Graeme Hirst. 2012. Ro- bust, lexicalized native language identification. In Proceedings of COLING 2012, pages 391- 408. The COLING 2012 Organizing Commit- tee, Mumbai, India.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "The British National Corpus Users Reference Guide", |
|
"authors": [ |
|
{ |
|
"first": "Lou", |
|
"middle": [], |
|
"last": "Burnard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lou Burnard. 2000. The British National Cor- pus Users Reference Guide. Oxford University Computing Services.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Do Web corpora from top-level domains represent national varieties of English?", |
|
"authors": [ |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Cook", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graeme", |
|
"middle": [], |
|
"last": "Hirst", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 11th International Conference on Textual Data Statistical Analysis", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "281--293", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paul Cook and Graeme Hirst. 2012. Do Web cor- pora from top-level domains represent national varieties of English? In Proceedings of the 11th International Conference on Textual Data Statistical Analysis, pages 281-293. Li\u00e8ge, Bel- gium.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Support-vector networks", |
|
"authors": [ |
|
{ |
|
"first": "Corinna", |
|
"middle": [], |
|
"last": "Cortes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vladimir", |
|
"middle": [], |
|
"last": "Vapnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Machine Learning", |
|
"volume": "20", |
|
"issue": "", |
|
"pages": "273--297", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Corinna Cortes and Vladimir Vapnik. 1995. Support-vector networks. Machine Learning, 20:273-297.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "): Design, architecture, and linguistic insights", |
|
"authors": [ |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Davies", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "The 385+ million word Corpus of Contemporary American English", |
|
"volume": "14", |
|
"issue": "", |
|
"pages": "159--190", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mark Davies. 2009. The 385+ million word Cor- pus of Contemporary American English (1990- 2008+): Design, architecture, and linguistic in- sights. International Journal of Corpus Linguis- tics, 14(2):159-190.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "An analysis of the relative hardness of reuters-21578 subsets", |
|
"authors": [ |
|
{ |
|
"first": "Franca", |
|
"middle": [], |
|
"last": "Debole", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fabrizio", |
|
"middle": [], |
|
"last": "Sebastiani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Journal of the American Society for Information Science and Technology", |
|
"volume": "56", |
|
"issue": "6", |
|
"pages": "584--596", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Franca Debole and Fabrizio Sebastiani. 2005. An analysis of the relative hardness of reuters- 21578 subsets. Journal of the American So- ciety for Information Science and Technology, 56(6):584-596.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Support vector machines for spam categorization", |
|
"authors": [ |
|
{ |
|
"first": "Harris", |
|
"middle": [], |
|
"last": "Drucker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vladimir", |
|
"middle": [], |
|
"last": "Vapnik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dongui", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "IEEE Transactions on Neural Networks", |
|
"volume": "10", |
|
"issue": "", |
|
"pages": "1048--1054", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Harris Drucker, Vladimir Vapnik, and Dongui Wu. 1999. Support vector machines for spam cate- gorization. IEEE Transactions on Neural Net- works, 10:1048-1054.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "A latent variable model for geographic lexical variation", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Eisenstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O'", |
|
"middle": [], |
|
"last": "Brendan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Connor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Xing", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1277--1287", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Eisenstein, Brendan O'Connor, Noah A. Smith, and Eric P. Xing. 2010. A latent variable model for geographic lexical variation. In Em- pirical Methods in Natural Language Process- ing, pages 1277-1287. Cambridge, MA, USA.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Author profiling for english emails", |
|
"authors": [ |
|
{ |
|
"first": "Dominique", |
|
"middle": [], |
|
"last": "Estival", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tanja", |
|
"middle": [], |
|
"last": "Gaustad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Hutchinson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proccedings of the 10th Conference for the Pacific Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "263--272", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dominique Estival, Tanja Gaustad, and Ben Hutchinson. 2007. Author profiling for en- glish emails. In Proccedings of the 10th Con- ference for the Pacific Association for Com- putational Linguistics, pages 263-272. Mel- bourne,Australia.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "The Australian corpus project and Australian English", |
|
"authors": [ |
|
{ |
|
"first": "Elizabeth", |
|
"middle": [], |
|
"last": "Green", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pam", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1991, |
|
"venue": "International Computer Archive of Modern English", |
|
"volume": "15", |
|
"issue": "", |
|
"pages": "37--53", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elizabeth Green and Pam Peters. 1991. The Aus- tralian corpus project and Australian English. International Computer Archive of Modern En- glish, 15:37-53.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Geolocation prediction in social media data by finding location indicative words", |
|
"authors": [ |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Cook", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Baldwin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "The COLING 2012 Organizing Committee, Mumbai", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1045--1062", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bo Han, Paul Cook, and Timothy Baldwin. 2012. Geolocation prediction in social media data by finding location indicative words. In Proceed- ings of COLING 2012, pages 1045-1062. The COLING 2012 Organizing Committee, Mum- bai, India.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Bigrams of syntactic labels for authorship discrimination of short texts", |
|
"authors": [ |
|
{ |
|
"first": "Graeme", |
|
"middle": [], |
|
"last": "Hirst", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Olga", |
|
"middle": [], |
|
"last": "Feiguina", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Literary and Linguistic Computing", |
|
"volume": "22", |
|
"issue": "4", |
|
"pages": "405--417", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Graeme Hirst and Olga Feiguina. 2007. Bigrams of syntactic labels for authorship discrimination of short texts. Literary and Linguistic Comput- ing, 22(4):405-417.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "The American National Corpus: Then, now, and tomorrow", |
|
"authors": [ |
|
{ |
|
"first": "Nancy", |
|
"middle": [], |
|
"last": "Ide", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Selected Proceedings of the 2008 HC-SNet Workshop on Designing an Australian National Corpus", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "108--113", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nancy Ide. 2009. The American National Corpus: Then, now, and tomorrow. In Michael Haugh, editor, Selected Proceedings of the 2008 HC- SNet Workshop on Designing an Australian Na- tional Corpus, pages 108-113. Cascadilla Pro- ceedings Project, Sommerville, MA.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "World Englishes: A resource book for students", |
|
"authors": [ |
|
{ |
|
"first": "Jennifer", |
|
"middle": [], |
|
"last": "Jenkins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Routledge", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jennifer Jenkins. 2009. World Englishes: A re- source book for students. Routledge, London, second edition.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Text categorization with support vector machines: learning with many relevant features", |
|
"authors": [ |
|
{ |
|
"first": "Thorsten", |
|
"middle": [], |
|
"last": "Joachims", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Proceedings of the 10th European Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "137--142", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thorsten Joachims. 1998. Text categorization with support vector machines: learning with many relevant features. In Proceedings of the 10th Eu- ropean Conference on Machine Learning, pages 137-142. Chemnitz, Germany.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Comparing corpora", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Kilgarriff", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "International Journal of Corpus Linguistics", |
|
"volume": "6", |
|
"issue": "1", |
|
"pages": "97--133", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Kilgarriff. 2001. Comparing corpora. International Journal of Corpus Linguistics, 6(1):97-133.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "A corpus factory for many languages", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Kilgarriff", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siva", |
|
"middle": [], |
|
"last": "Reddy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Pomik\u00e1lek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pvs", |
|
"middle": [], |
|
"last": "Avinesh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the Seventh conference on International Language Resources and Evaluation (LREC 2010)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "904--910", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Kilgarriff, Siva Reddy, Jan Pomik\u00e1lek, and Avinesh PVS. 2010. A corpus factory for many languages. In Proceedings of the Seventh con- ference on International Language Resources and Evaluation (LREC 2010), pages 904-910. Valletta, Malta.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Automatically determining an anonymous authors native language", |
|
"authors": [ |
|
{ |
|
"first": "Moshe", |
|
"middle": [], |
|
"last": "Koppel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Schler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kfir", |
|
"middle": [], |
|
"last": "Zigdon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Intelligence and Security Informatics", |
|
"volume": "3495", |
|
"issue": "", |
|
"pages": "209--217", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Moshe Koppel, Jonathan Schler, and Kfir Zigdon. 2005. Automatically determining an anony- mous authors native language. Intelligence and Security Informatics, 3495:209-217.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Crossdomain feature selection for language identification", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Lui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Baldwin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 5th International Joint Conference on Natural Language Processing (IJCNLP 2011)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "553--561", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Lui and Timothy Baldwin. 2011. Cross- domain feature selection for language identifi- cation. In Proceedings of the 5th International Joint Conference on Natural Language Pro- cessing (IJCNLP 2011), pages 553-561. Chi- ang Mai, Thailand.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "langid.py: An off-the-shelf language identification tool", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Lui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Baldwin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics (ACL 2012) Demo Session", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "25--30", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Lui and Timothy Baldwin. 2012. langid.py: An off-the-shelf language identification tool. In Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics (ACL 2012) Demo Session, pages 25-30. Jeju, Republic of Korea.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Inference and disputed authorship: The Federalist Papers", |
|
"authors": [ |
|
{ |
|
"first": "Frederick", |
|
"middle": [], |
|
"last": "Mosteller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Wallace", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1964, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Frederick Mosteller and David L. Wallace. 1964. Inference and disputed authorship: The Feder- alist Papers. Addison-Wesley, Reading,USA.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "A survey on transfer learning. Knowledge and Data Engineering", |
|
"authors": [ |
|
{ |
|
"first": "Qiang", |
|
"middle": [], |
|
"last": "Sinno Jialin Pan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "IEEE Transactions on", |
|
"volume": "22", |
|
"issue": "10", |
|
"pages": "1345--1359", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sinno Jialin Pan and Qiang Yang. 2010. A survey on transfer learning. Knowledge and Data En- gineering, IEEE Transactions on, 22(10):1345- 1359.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "The automatic identification of lexical variation between language varieties", |
|
"authors": [ |
|
{ |
|
"first": "Yves", |
|
"middle": [], |
|
"last": "Peirsman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dirk", |
|
"middle": [], |
|
"last": "Geeraerts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dirk", |
|
"middle": [], |
|
"last": "Speelman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Natural Language Engineering", |
|
"volume": "16", |
|
"issue": "4", |
|
"pages": "469--491", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yves Peirsman, Dirk Geeraerts, and Dirk Speel- man. 2010. The automatic identification of lex- ical variation between language varieties. Nat- ural Language Engineering, 16(4):469-491.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Linguini: language identification for multilingual documents", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Prager", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Proceedings the 32nd Annual Hawaii International Conference on Systems Sciences (HICSS-32)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John M. Prager. 1999. Linguini: language iden- tification for multilingual documents. In Pro- ceedings the 32nd Annual Hawaii International Conference on Systems Sciences (HICSS-32).", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "C4.5: Programs for Machine Learning", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"Ross" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quinlan", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Ross Quinlan. 1993. C4.5: Programs for Machine Learning. Morgan Kaufmann, San Mateo, USA.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Grammatical and lexical variance in English", |
|
"authors": [ |
|
{ |
|
"first": "Randolph", |
|
"middle": [], |
|
"last": "Quirk", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Randolph Quirk. 1995. Grammatical and lexical variance in English. Longman, London.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Supervised text-based geolocation using language models on an adaptive grid", |
|
"authors": [ |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Roller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Speriosu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sarat", |
|
"middle": [], |
|
"last": "Rallapalli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Wing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Baldridge", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1500--1510", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephen Roller, Michael Speriosu, Sarat Ralla- palli, Benjamin Wing, and Jason Baldridge. 2012. Supervised text-based geolocation us- ing language models on an adaptive grid. In Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Pro- cessing and Computational Natural Language Learning, pages 1500-1510. Jeju Island, Korea.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "The New York Times Annotated Corpus. Linguistic Data Consortium", |
|
"authors": [ |
|
{ |
|
"first": "Evan", |
|
"middle": [], |
|
"last": "Sandhaus", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Evan Sandhaus. 2008. The New York Times An- notated Corpus. Linguistic Data Consortium, Philadelphia, PA.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Machine learning in automated text categorization", |
|
"authors": [ |
|
{ |
|
"first": "Fabrizio", |
|
"middle": [], |
|
"last": "Sebastiani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "ACM computing surveys (CSUR)", |
|
"volume": "34", |
|
"issue": "1", |
|
"pages": "1--47", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fabrizio Sebastiani. 2002. Machine learning in automated text categorization. ACM computing surveys (CSUR), 34(1):1-47.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Text categorization", |
|
"authors": [ |
|
{ |
|
"first": "Fabrizio", |
|
"middle": [], |
|
"last": "Sebastiani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "TEMIS Text Mining Solutions S.A", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "109--129", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fabrizio Sebastiani. 2005. Text categorization, pages 109-129. TEMIS Text Mining Solutions S.A., Italy.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Authorship attribution with authoraware topic models", |
|
"authors": [ |
|
{ |
|
"first": "Yanir", |
|
"middle": [], |
|
"last": "Seroussi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fabian", |
|
"middle": [], |
|
"last": "Bohnert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ingrid", |
|
"middle": [], |
|
"last": "Zukerman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "264--269", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yanir Seroussi, Fabian Bohnert, and Ingrid Zuker- man. 2012. Authorship attribution with author- aware topic models. In Proceedings of the 50th Annual Meeting of the Association for Compu- tational Linguistics (Volume 2: Short Papers), pages 264-269. Association for Computational Linguistics, Jeju Island, Korea.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Creating general-purpose corpora using automated search engine queries", |
|
"authors": [ |
|
{ |
|
"first": "Serge", |
|
"middle": [], |
|
"last": "Sharoff", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Wacky! Working papers on the Web as Corpus", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "63--98", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Serge Sharoff. 2006. Creating general-purpose corpora using automated search engine queries. In Marco Baroni and Silvia Bernardini, editors, Wacky! Working papers on the Web as Corpus, pages 63-98. GEDIT, Bologna, Italy.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "A survey of modern authorship attribution methods", |
|
"authors": [ |
|
{ |
|
"first": "Efstathios", |
|
"middle": [], |
|
"last": "Stamatatos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Journal of The American Society for Information Science and Technology", |
|
"volume": "60", |
|
"issue": "", |
|
"pages": "538--556", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Efstathios Stamatatos. 2009. A survey of modern authorship attribution methods. Journal of The American Society for Information Science and Technology, 60:538-556.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "An exploration of language identification techniques for the dutch folktale database", |
|
"authors": [ |
|
{ |
|
"first": "Dolf", |
|
"middle": [], |
|
"last": "Trieschnigg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Djoerd", |
|
"middle": [], |
|
"last": "Hiemstra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mari\u00ebt", |
|
"middle": [], |
|
"last": "Theune", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Franciska", |
|
"middle": [], |
|
"last": "De", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jong", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Theo", |
|
"middle": [], |
|
"last": "Meder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the LREC workshop on the Adaptation of Language Resources and Tools for Processing Cultural Heritage Objects", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dolf Trieschnigg, Djoerd Hiemstra, Mari\u00ebt The- une, Franciska de Jong, and Theo Meder. 2012. An exploration of language identification tech- niques for the dutch folktale database. In Pro- ceedings of the LREC workshop on the Adapta- tion of Language Resources and Tools for Pro- cessing Cultural Heritage Objects.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Standard English: What it isnt", |
|
"authors": [ |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Trudgill", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Standard English: The widening debate", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "117--128", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter Trudgill. 1999. Standard English: What it isnt. In Tony Bex and Richard J. Watts, editors, Standard English: The widening debate, pages 117-128. Routledge, London.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "International English: A guide to varieties of Standard English", |
|
"authors": [ |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Trudgill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jean", |
|
"middle": [], |
|
"last": "Hannah", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter Trudgill and Jean Hannah. 2008. Interna- tional English: A guide to varieties of Standard English. Hodder Education, London, fifth edi- tion.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "New tweets per second record", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Twitter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Twitter. 2013. New tweets per second record, and how! https://blog.twitter.com/ 2013/new-tweets- per-second-record-and-how. Re- trieved 19 August 2013.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Simple supervised document geolocation with geodesic grids", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Benjamin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Wing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Baldridge", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "955--964", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Benjamin P. Wing and Jason Baldridge. 2011. Simple supervised document geolocation with geodesic grids. In Proceedings of the 49th An- nual Meeting of the Association for Computa- tional Linguistics: Human Language Technolo- gies -Volume 1, HLT '11, pages 955-964. Asso- ciation for Computational Linguistics, Portland, Oregon, USA.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Contrastive analysis and native language identification", |
|
"authors": [ |
|
{ |
|
"first": "Sze-Meng Jojo", |
|
"middle": [], |
|
"last": "Wong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Dras", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Australasian Language Technology Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "53--61", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sze-Meng Jojo Wong and Mark Dras. 2009. Con- trastive analysis and native language identifi- cation. In Proceedings of the Australasian Language Technology Workshop 2009 (ALTW 2009), pages 53-61. Sydney, Australia.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Exploiting parse structures for native language identification", |
|
"authors": [ |
|
{ |
|
"first": "Sze-Meng Jojo", |
|
"middle": [], |
|
"last": "Wong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Dras", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1600--1610", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sze-Meng Jojo Wong and Mark Dras. 2011. Ex- ploiting parse structures for native language identification. In Proceedings of the 2011 Con- ference on Empirical Methods in Natural Lan- guage Processing (EMNLP 2011), pages 1600- 1610. Association for Computational Linguis- tics, Edinburgh, Scotland, UK.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "Exploring adaptor grammars for native language identification", |
|
"authors": [ |
|
{ |
|
"first": "Sze-Meng Jojo", |
|
"middle": [], |
|
"last": "Wong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Dras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "699--709", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sze-Meng Jojo Wong, Mark Dras, and Mark John- son. 2012. Exploring adaptor grammars for native language identification. In Proceedings of the Joint Conference on Empirical Meth- ods in Natural Language Processing and Com- putational Natural Language Learning 2012 (EMNLP-CoNLL 2012), pages 699-709. Asso- ciation for Computational Linguistics, Jeju Is- land, Korea.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "A reexamination of text categorization methods", |
|
"authors": [ |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xin", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Proceedings of the 22nd annual international ACM SIGIR conference on Research and development in information retrieval -SIGIR '99", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "42--49", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yiming Yang and Xin Liu. 1999. A re- examination of text categorization methods. In Proceedings of the 22nd annual international ACM SIGIR conference on Research and de- velopment in information retrieval -SIGIR '99, pages 42-49. ACM Press, New York, USA.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "On sentence-length as a statistical characteristic of style in prose: With application to two cases of disputed authorship", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Udny Yule", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1939, |
|
"venue": "Biometrika", |
|
"volume": "30", |
|
"issue": "", |
|
"pages": "363--390", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "G. Udny Yule. 1939. On sentence-length as a sta- tistical characteristic of style in prose: With ap- plication to two cases of disputed authorship. Biometrika, 30:363-390.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "Arabic dialect identification", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Omar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Zaidan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Computational Linguistics", |
|
"volume": "", |
|
"issue": "1", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Omar F Zaidan and Chris Callison-Burch. 2012. Arabic dialect identification. Computational Linguistics, 52(1).", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "Effective and Scalable Authorship Attribution Using Function Words", |
|
"authors": [ |
|
{ |
|
"first": "Ying", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Zobel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Asia Information Retrieval Symposium", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "174--189", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ying Zhao and Justin Zobel. 2005. Effective and Scalable Authorship Attribution Using Func- tion Words. In Asia Information Retrieval Sym- posium, pages 174-189.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF2": { |
|
"text": "Macroaverage F-score for cross-domain learning. For each domain/method combination, a classifier is trained on the union of the 3 non-target domains.", |
|
"html": null, |
|
"content": "<table><tr><td/><td/><td colspan=\"2\">Target Domain</td><td/></tr><tr><td>Approach</td><td>NATIONAL</td><td>WEB</td><td colspan=\"2\">WEBGOV TWITTER</td></tr><tr><td/><td>(2-way)</td><td>(3-way)</td><td>(3-way)</td><td>(3-way)</td></tr><tr><td>BASELINE</td><td>0.499</td><td>0.336</td><td>0.328</td><td>0.329</td></tr><tr><td>TEXTCATEGORIZATION</td><td>0.975</td><td>0.762</td><td>0.870</td><td>0.773</td></tr><tr><td>NATIVELID</td><td>0.946</td><td>0.577</td><td>0.708</td><td>0.521</td></tr><tr><td>AUTHORSHIPATTRIB</td><td>0.591</td><td>0.368</td><td>0.489</td><td>0.451</td></tr><tr><td>LANGID</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>GEOLOCATION</td><td>0.861</td><td>0.532</td><td>0.544</td><td>0.316</td></tr><tr><td>VARIANTPAIR</td><td>0.532</td><td>0.359</td><td>0.333</td><td>0.337</td></tr></table>", |
|
"type_str": "table", |
|
"num": null |
|
}, |
|
"TABREF3": { |
|
"text": "", |
|
"html": null, |
|
"content": "<table><tr><td>: Macroaverage F-score for in-domain (supervised) classification for each domain/method combi-</td></tr><tr><td>nation. (We do not have in-domain LANGID results as the method of Lui and Baldwin (2011) specifically</td></tr><tr><td>requires cross-domain training data.)</td></tr></table>", |
|
"type_str": "table", |
|
"num": null |
|
}, |
|
"TABREF5": { |
|
"text": "Macroaverage F-score for pairwise cross-domain learning. Same-domain results", |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |