|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T02:10:20.018387Z" |
|
}, |
|
"title": "Cross-Domain Language Modeling: An Empirical Investigation", |
|
"authors": [ |
|
{ |
|
"first": "Vincent", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The Australian National University", |
|
"location": { |
|
"settlement": "Canberra", |
|
"country": "Australia" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Sarvnaz", |
|
"middle": [], |
|
"last": "Karimi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The Australian National University", |
|
"location": { |
|
"settlement": "Canberra", |
|
"country": "Australia" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Maciej", |
|
"middle": [], |
|
"last": "Rybinski", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The Australian National University", |
|
"location": { |
|
"settlement": "Canberra", |
|
"country": "Australia" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Zhenchang", |
|
"middle": [], |
|
"last": "Xing", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The Australian National University", |
|
"location": { |
|
"settlement": "Canberra", |
|
"country": "Australia" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Csiro", |
|
"middle": [], |
|
"last": "Data61", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The Australian National University", |
|
"location": { |
|
"settlement": "Canberra", |
|
"country": "Australia" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sydney", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The Australian National University", |
|
"location": { |
|
"settlement": "Canberra", |
|
"country": "Australia" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Australia", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The Australian National University", |
|
"location": { |
|
"settlement": "Canberra", |
|
"country": "Australia" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Transformer encoder models exhibit strong performance in single-domain applications. However, in a cross-domain situation, using a sub-word vocabulary model results in subword overlap. This is an issue when there is an overlap between sub-words that share no semantic similarity between domains. We hypothesize that alleviating this overlap allows for a more effective modeling of multi-domain tasks; we consider the biomedical and general domains in this paper. We present a study on reducing sub-word overlap by scaling the vocabulary size in a Transformer encoder model while pretraining with multiple domains. We observe a significant increase in downstream performance in the general-biomedical crossdomain from a reduction in sub-word overlap.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Transformer encoder models exhibit strong performance in single-domain applications. However, in a cross-domain situation, using a sub-word vocabulary model results in subword overlap. This is an issue when there is an overlap between sub-words that share no semantic similarity between domains. We hypothesize that alleviating this overlap allows for a more effective modeling of multi-domain tasks; we consider the biomedical and general domains in this paper. We present a study on reducing sub-word overlap by scaling the vocabulary size in a Transformer encoder model while pretraining with multiple domains. We observe a significant increase in downstream performance in the general-biomedical crossdomain from a reduction in sub-word overlap.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Contemporary language models are pretrained on massive, linguistically diverse corpora (Lan et al., 2020; Devlin et al., 2019a) . It is not uncommon for these models to excel at benchmark downstream tasks (Wang et al., 2019a) , given the use of contextual representations (Devlin et al., 2019b ) that are trained on a variety of source domains-a term used to describe a distribution of language on a given topic or genre (for example BIOMEDICAL, SCIENTIFIC)-or GENERAL domain. However, the benefit of GENERAL domain pretraining for specialized application is questionable, as applying these language models (Gu et al., 2020) to specialized tasks is worse than using specialized counterparts (Beltagy et al., 2019) . This degradation still occurs after sequential pretraining on specialized domains (Shin et al., 2020) when fine-tuned (updates to pretraining) to downstream tasks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 87, |
|
"end": 105, |
|
"text": "(Lan et al., 2020;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 106, |
|
"end": 127, |
|
"text": "Devlin et al., 2019a)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 205, |
|
"end": 225, |
|
"text": "(Wang et al., 2019a)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 272, |
|
"end": 293, |
|
"text": "(Devlin et al., 2019b", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 607, |
|
"end": 624, |
|
"text": "(Gu et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 691, |
|
"end": 713, |
|
"text": "(Beltagy et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 798, |
|
"end": 817, |
|
"text": "(Shin et al., 2020)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We hypothesize some of this degradation lies in the use of a sub-word vocabulary (Si et al., 2019) . Sub-word vocabularies (Sennrich et al., 2016; Wu et al., 2016a,b) allow for efficient modeling of a source language distribution with a limited vocabulary size. However, problematically sub-words can be shared between different words-for example hypotension and hypocritical-with different meanings. This potentially conflates the vector representation of a sub-word (or wordpiece) causing sub-word overlap. When this overlap occurs with sub-words appearing in multiple domain contexts we call this cross-domain sub-word overlap.", |
|
"cite_spans": [ |
|
{ |
|
"start": 81, |
|
"end": 98, |
|
"text": "(Si et al., 2019)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 123, |
|
"end": 146, |
|
"text": "(Sennrich et al., 2016;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 147, |
|
"end": 166, |
|
"text": "Wu et al., 2016a,b)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "As a pilot empirical study, we investigate reducing cross-domain sub-word overlap, by increasing vocabulary size, in language models pretrained in the GENERAL and BIOMEDICAL cross-domain. To evaluate the effect of sub-word overlap, general and biomedical domain benchmarks are used in this study as the task distribution includes different linguistic phenomena such as grammar, sentiment, textual similarity, natural language inference (Wang et al., 2019a) . Interestingly, we find that disjoint sub-word vocabulary sets are not ideal. Some sub-word overlap is necessary and unavoidable, and a different level of overlap is ideal for each target domain. We also find a positive trend occurs when reducing cross-domain sub-word overlap, suggesting that there is a trade-off depending on the target downstream task and domain.", |
|
"cite_spans": [ |
|
{ |
|
"start": 436, |
|
"end": 456, |
|
"text": "(Wang et al., 2019a)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To better understand the results, we look at the impact of the pretraining data domain on downstream benchmark performance. Surprisingly, we found that inclusion of the general domain with a specialized domain improves downstream performance for that specialized domain's tasks, but not the other way around. This suggests that specialized domains should be trained in tandem with a general one.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our contribution is a pilot study that investigates a pretraining strategy to reduce cross-domain subword overlap between GENERAL and BIOMEDICAL domains. We train cross-domain language models with varied vocabulary sizes and evaluate them on downstream classification tasks. We show that a significant improvement can be achieved on two benchmark datasets ((Wang et al., 2019a) , (Peng et al., 2019) ) when reducing overlap. Further experiments point to the importance of selecting appropriate pretraining data for specialized domains.", |
|
"cite_spans": [ |
|
{ |
|
"start": 356, |
|
"end": 377, |
|
"text": "((Wang et al., 2019a)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 380, |
|
"end": 399, |
|
"text": "(Peng et al., 2019)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We discuss strategies from the literature to adapt the GENERAL domain language model, in particular a Transformer (Vaswani et al., 2017) encoder (Devlin et al., 2019b) , to a specialized domain.", |
|
"cite_spans": [ |
|
{ |
|
"start": 114, |
|
"end": 136, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 145, |
|
"end": 167, |
|
"text": "(Devlin et al., 2019b)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Domain-specific pretraining Many studies have adapted BERT, a popular Transformer encoder, to a specialized domain. However, as BERT was pretrained with a general domain sub-word vocabulary and trained on general domain data (BookCorpus and Wikipedia), domain adaptation is needed. For example, in the BIOMEDICAL domain, BioBERT (Lee et al., 2019) benefited from additional pretraining of the pretrained BERT model on academic biomedical corpora (PubMed Open Access and MEDLINE), showing a marked improvement on downstream biomedical tasks. DAPT (Gururangan et al., 2020) showed similar improvements.", |
|
"cite_spans": [ |
|
{ |
|
"start": 329, |
|
"end": 347, |
|
"text": "(Lee et al., 2019)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 541, |
|
"end": 571, |
|
"text": "DAPT (Gururangan et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "However, BioBERT's approach was less effective in clinical applications; thus, Clinical-BERT (Alsentzer et al., 2019) was trained on domain-specific clinical corpora to improve upon downstream clinical tasks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 93, |
|
"end": 117, |
|
"text": "(Alsentzer et al., 2019)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Similarly, Blue-BERT (Peng et al., 2019) was pretrained on a combination of domain-specific data, including PubMed abstracts and clinical notes. However, these approaches were only specialized for narrow task distributions rather than the entire BIOMEDI-CAL domain (Nguyen et al., 2019) and were trained sequentially (general to biomedical) rather than combined initially, which may suffer from effects such as catastrophic forgetting (McCloskey and Cohen, 1989) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 40, |
|
"text": "(Peng et al., 2019)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 265, |
|
"end": 286, |
|
"text": "(Nguyen et al., 2019)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 435, |
|
"end": 462, |
|
"text": "(McCloskey and Cohen, 1989)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Vocabulary Insertion Other studies considered extending a Transformer-based model's vocabulary without repeating the expensive pretraining step. In particular, one study replaced unused vocabulary elements with medical suffixes and prefixes (Nguyen et al., 2019) . Additional pretraining steps were used so that the model learned the new vocabulary. They found that vocabulary insertion did not help as much as an increase in pretraining data. A similar observation is found by Shin et al. (2020) and Beltagy et al. (2019) . However, another study using a domain-specific tokenizer for vocabulary insertion (Tai, 2019) found improvements in the German legal domain. However, improvements from vocabulary insertion are minimal, as there is still an interaction between the original vocabulary embeddings and the embeddings added during the fine-tuning step, resulting in sub-word overlap. Wang et al. (2019b) proposes an enrichment of the BERT vocabulary by using embeddings from other models and learns a projection to the BERT embedding space in a multilingual setting. exBERT (Tai et al., 2020) extends the embedding dimension with domain-specific vocabulary. The model's original weights and embeddings are frozen during extended vocabulary training. Within the same class of approaces, (Poerner et al., 2020) propose a method where general domain embeddings are aligned with target-domain-specific word2vec embeddings. However, vocabulary insertion approaches circumvent the pretraining stage with domain-specific data which may potentially be more important than a vocabulary change (Shin et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 241, |
|
"end": 262, |
|
"text": "(Nguyen et al., 2019)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 478, |
|
"end": 496, |
|
"text": "Shin et al. (2020)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 501, |
|
"end": 522, |
|
"text": "Beltagy et al. (2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 607, |
|
"end": 618, |
|
"text": "(Tai, 2019)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 888, |
|
"end": 907, |
|
"text": "Wang et al. (2019b)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 1078, |
|
"end": 1096, |
|
"text": "(Tai et al., 2020)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 1290, |
|
"end": 1312, |
|
"text": "(Poerner et al., 2020)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 1588, |
|
"end": 1607, |
|
"text": "(Shin et al., 2020)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Domain-specific vocabulary pretraining An extension to these methods is to pretrain on a target domain corpus with a custom vocabulary. SciB-ERT (Beltagy et al., 2019) showed that pretraining from scratch with a domain-specific vocabulary is better than a general-purpose vocabulary despite having fewer combined pretraining examples. Similarly, BioMegatron (Shin et al., 2020) showed that a larger custom vocabulary is useful for biomedical named entity recognition tasks and that a domainspecific vocabulary is more valuable than a larger model. They also show that a larger vocabulary size caused a reduction in over-segmentation, a problem that occurs when using a general vocabulary on specialized tasks (Chalkidis et al., 2020 ) that increases sub-word overlap.", |
|
"cite_spans": [ |
|
{ |
|
"start": 145, |
|
"end": 167, |
|
"text": "(Beltagy et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 358, |
|
"end": 377, |
|
"text": "(Shin et al., 2020)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 709, |
|
"end": 732, |
|
"text": "(Chalkidis et al., 2020", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Our work is a pilot study that extends upon domain-specific vocabulary pretraining to investigate cross-domain sub-word modeling. We pretrain models with varying vocabulary sizes to reduce sub-word overlap. In particular, we focus on cross-domain pretraining, which was previously unexplored in vocabulary experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We use the combined English snapshot of Wikipedia (a proxy for the general domain) and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets and Tasks", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "PubMed Open Access Full-Text corpora (biomedical domain) taken on the 1st of April 2020 for pretraining the language models and tokenizers. The PubMed corpus, consisting of 8.3 billion tokens, is preprocessed to remove references, while the Wikipedia corpus, consisting of 2.0 billion tokens, is extracted and cleaned with wikiextractor (Attardi, 2015). We use this pretraining data combination as a cross-domain proxy of the GENERAL and BIOMEDICAL domain. We use the training and validation sets of the GLUE benchmark (Wang et al., 2019a) to fine-tune our models for general domain benchmarking. Likewise, we use the publicly available subset of the BLUE tasks collection for the biomedical domain (Peng et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 519, |
|
"end": 539, |
|
"text": "(Wang et al., 2019a)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 699, |
|
"end": 718, |
|
"text": "(Peng et al., 2019)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets and Tasks", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We perform pretraining with a cross-domain corpus with the ALBERT model, which results in a high degree of cross-domain sub-word overlap. In addition, we experiment with models that have different vocabulary sizes (5000 to 100,000), each with a varying degree of sub-word overlap during pretraining. In Transformer models, the embedding dimension is coupled with the model's hidden dimension, causing the vocabulary size to control the model size-a larger vocabulary size exponentially increases the model's size. To remedy this, we use the ALBERT model (Lan et al., 2020) , which projects the embedding dimension to a latent vocabulary dimension before projecting it to the model's hidden dimension. This projection allows scaling of the vocabulary size without significantly impacting the model's size.", |
|
"cite_spans": [ |
|
{ |
|
"start": 554, |
|
"end": 572, |
|
"text": "(Lan et al., 2020)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Task performance and vocabulary size After pretraining, for each vocabulary size, we then evaluate our language models on downstream BLUE and GLUE benchmark datasets to determine how downstream performance is affected by the amount of sub-word overlap.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Determining Sub-word Overlap To determine the amount of sub-word overlap in relation to vocabulary size, we tokenize each general domain and biomedical task in GLUE and BLUE for each vocabulary size and compute the Jaccard index (Jaccard, 1912). The GLUE and BLUE tasks, are used as a cross-domain proxy between the GENERAL and BIOMEDICAL domains.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Experimental Setup For each model (vocabulary size |V |), we train a separate tokenizer using Byte-Pair Encoding (Sennrich et al., 2016) . We use masked language modeling to train the largest model, ALBERT |V =100,000| , on the combined corpora of Wikipedia and PubMed for two weeks using four V100 GPUs with an effective batch size of 256. We use the LAMB (You et al., 2020) optimizer and a maximum model sequence length of 512. All other hyperparameters are left as default, as described by Lan et al. (2020) . For each model, we select the checkpoint such that validation performance (perplexity) is equal for all models. We then evaluate each model on both general domain and biomedical benchmark tasks. Specifically, we fine-tune each model for a maximum of 15 epochs for all the biomedical tasks, taking the best model on the validation set for inference over the test set. For the general domain tasks, to reduce overfitting (false convergence), we train each task for five epochs and report the validation performance as the test set labels are not publicly available.", |
|
"cite_spans": [ |
|
{ |
|
"start": 113, |
|
"end": 136, |
|
"text": "(Sennrich et al., 2016)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 357, |
|
"end": 375, |
|
"text": "(You et al., 2020)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 493, |
|
"end": 510, |
|
"text": "Lan et al. (2020)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "However, scaling vocabulary size itself can lead to performance increases (Shin et al., 2020) . Hence, we use the checkpoint where validation performance for masked language modeling is equal across all models; meaning that all models have similar capacity for language modeling with the only difference being vocabulary size during downstream updates via fine-tuning. The increase in parameter count due to vocabulary embeddings is negligible as the embeddings are all projected into the same sized latent dimension before being used by the model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 74, |
|
"end": 93, |
|
"text": "(Shin et al., 2020)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The classification layer used is created for each individual task and is not shared by any model. We use the default classification layer, with the correct label output layer as provided by the huggingface library (Wolf et al., 2019 ", |
|
"cite_spans": [ |
|
{ |
|
"start": 214, |
|
"end": 232, |
|
"text": "(Wolf et al., 2019", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We trained masked language models of varying vocabulary sizes, each with its own degree of subword overlap and evaluate on downstream general and biomedical language understanding benchmarks. We found that cross-domain sub-word overlap reduction benefited the cross-domain between the general (Table 1 ) and biomedical domain (Figure 1) as sub-word overlap decreased (Table 2) .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 293, |
|
"end": 301, |
|
"text": "(Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 326, |
|
"end": 336, |
|
"text": "(Figure 1)", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 367, |
|
"end": 376, |
|
"text": "(Table 2)", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In terms of sub-word overlap, we find that the Jaccard index decreases sharply with vocabulary size ( Table 2) , indicating that biomedical and general domain tasks share common elements. This overlap decreases rapidly, especially at larger vocabularies (26.6% overlap at |V | = 100, 000). A similar overlap percentage is reported by Beltagy et al. (2019) when measuring overlap between scientific and general domain vocabulary.", |
|
"cite_spans": [ |
|
{ |
|
"start": 334, |
|
"end": 355, |
|
"text": "Beltagy et al. (2019)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 102, |
|
"end": 110, |
|
"text": "Table 2)", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We also report the sub-word overlap proportional to vocabulary size ( Table 2 ) and observe that it also falls sharply in a similar pattern. Although subword overlap proportion decreases, at least 87.3% of the vocabulary is still used, meaning vocabulary elements are not underused. Generally, reducing the overlap from approximately 60% Jaccard Index (|V | < 30000) to 40% (|V | \u2265 70000) increases effectiveness in the biomedical domain while producing small improvements in the general domain ( Table 1 ). This indicates that reducing sub-word overlap does not reduce vocabulary usage and that downstream fine-tuning with a larger vocabulary size alleviates overlap and improves performance. However, we find that few tasks perform best with a maximal separation of the biomedical and general domain vocabulary, with the only tasks performing well are CoLA (grammar detection), MedNLI (inference classification) and DDI (relation extraction). This suggests that a degree of overlap in a cross-domain is beneficial and that these domains share similarities. This shared similarity is also observed by Toews and Holland (2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 1102, |
|
"end": 1126, |
|
"text": "Toews and Holland (2019)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 70, |
|
"end": 77, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 497, |
|
"end": 504, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "BLUE tasks seem to benefit from a larger separation of vocabularies, as suggested by an improved F1-score with increased vocabulary size (|V |) in Figure 1 . However, this benefit is less significant for GLUE tasks, as validation model selection (used in BLUE) could not be applied.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 147, |
|
"end": 155, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We find that GLUE results are worse when using combined (PubMed+Wiki) rather than individual pretraining corpora (see Table 3 ), while interestingly, the opposite appears to be true for BLUE. However, both benchmarks together show that the pretraining data and a larger vocabulary size helps in a cross-domain setting. Though it does not significantly hurt performance in the general domain, it significantly improves performance in the biomedical domain. Interestingly, pretraining with PubMed alone performed worse than pretraining with the Wikipedia corpus. A detailed table of results can be found in Appendix 5.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 118, |
|
"end": 125, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We observe that inference tasks fared better with a larger vocabulary (Table 4) , indicating that inference tasks are more affected by sub-word overlap. For textual entailment (RTE) and paraphrase detection (QQP), larger |V | had no positive effect. For SST-B (Textual Similarity) the model overfits as data size is small compared to the other tasks. Furthermore, while the default |V | in transformers is 30,000, only a few tasks perform well at this size, suggesting that |V | is an important consideration during pretraining depending on downstream task.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 70, |
|
"end": 79, |
|
"text": "(Table 4)", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "This study only considers the biomedical and general domains; we hypothesize these principles can be applied to other domains, such as multilingual machine translation. One particular observation relevant to our setup is that the general domain corpus is smaller than that of the target domain, which should also be considered when extrapolating our findings. Another limitation is that training of the language models was not performed to completion. However, language modeling effectiveness was fixed for a fair comparison. These limitations will be explored in future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We are also aware that the fixed perplexity does not fully disentangle the impacts of vocabulary overlap and vocabulary size on the downstream effectiveness. We plan to extend our study with further experiments to ensure the robustness of results presented here.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "When applying general domain Transformer language models to specialized ones, the use of sub-word modeling results causes sub-word overlap leading to decreased performance. We showed that increasing the vocabulary size of the model alleviates this performance penalty and improves downstream task performance on GENERAL and BIOMEDICAL benchmarks. Furthermore, we show that specialized domains improve significantly from a combination of specialized and general domain pretraining data. Our work is a pilot study into improving downstream performance on specialized domains with potential application in crossdomain tasks. In the future, we would extend this study to other applications such as machine translation and cross-lingual language modeling. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Prior to pretraining, when building the wordpiece tokenizer. We estimated the upper limit of unique vocabulary tokens based on the assumptions that:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Determining vocabulary size", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "(1) each corpora is english;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Determining vocabulary size", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "(2) each corpora shares no tokens; and, (3) the corpora's token frequency follows a zipf distribution (Zipf, 1936) . From, (El-legaRd, 1960) (Table 5 ), we calculated the upper limit for the vocabulary for each corpus given our second assumption and summed the result which gives a combined vocabulary size of approximately 90,000. We extend the vocabulary by an extra 10,000 to determine if our vocabulary size was sufficient.", |
|
"cite_spans": [ |
|
{ |
|
"start": 102, |
|
"end": 114, |
|
"text": "(Zipf, 1936)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 123, |
|
"end": 140, |
|
"text": "(El-legaRd, 1960)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 141, |
|
"end": 149, |
|
"text": "(Table 5", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Determining vocabulary size", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "We train separate models for each corpora, namely Wikipedia, PubMed and the combined corpora of Wikipedia and PubMed. We use the same training procedure as in our main experiments, but at a fixed vocabulary size of 40,000.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pretraining Data Experiments", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "We use the standard GLUE benchmark tasks and the BLUE language understanding tasks. We describe the BLUE tasks as follows: Relation Extraction DDI (Herrero-Zazo et al., 2013) , is a medical corpus consisting of texts from the Drugbank database and MeEDLINE abstracts annotated by experts for drug-drug interactions. Chemprot (Krallinger et al., 2017) , a classification task for five different chemical-protein interaction categories from PubMed abstracts. Multilabel classification Hallmarks of Cancers (HoC) (Baker et al., 2015) , a corpus of PubMed abstracts labeled with one or more of ten cancers. Inference For inference-based tasks, we use Medical Natural Language Inference (MedNLI) (Johnson et al., 2016a) created from MIMIC-III and annotated by radiologists with entailment, neutral and contradiction labels for each premise-hypothesis pair.", |
|
"cite_spans": [ |
|
{ |
|
"start": 147, |
|
"end": 174, |
|
"text": "(Herrero-Zazo et al., 2013)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 325, |
|
"end": 350, |
|
"text": "(Krallinger et al., 2017)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 510, |
|
"end": 530, |
|
"text": "(Baker et al., 2015)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Downstream Tasks", |
|
"sec_num": "8.2" |
|
}, |
|
{ |
|
"text": "Metrics Generally, for the BLUE tasks, we use macro averaged F1-score, except for HoC where we report the micro averaged F1-score similar to that described in Peng et al. (2019) . Evaluation of the GLUE benchmark is based on GLUE's official metrics (Wang et al., 2019a) : F1-score for QQP and MRPC, Pearson and Spearman correlation for STS-B, Matthew's Correlation for CoLA, which measures binary agreement between prediction and observed from -1 (total disagreement) and +1 (perfect prediction), and accuracy for the remaining tasks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 159, |
|
"end": 177, |
|
"text": "Peng et al. (2019)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 249, |
|
"end": 269, |
|
"text": "(Wang et al., 2019a)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Downstream Tasks", |
|
"sec_num": "8.2" |
|
}, |
|
{ |
|
"text": "We describe the intuition behind the reduction in sub-word overlap in more detail here and discuss some results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Minimizing Sub-word overlap", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "Sub-word overlap is a phenomena wherein tokens in a sub-word model will exhibit a polysemous, though it is closer to homonymy, effect where subwords will be shared by words that have different meanings. To combat this, we scale the vocabulary size, such that fewer sub-words are shared by different words. Chalkidis et al. (2020) also notes that in specialized contexts, general domain vocabularies tend to over-segment specialized terminology, such as diseases or medications. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 306, |
|
"end": 329, |
|
"text": "Chalkidis et al. (2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Definitions", |
|
"sec_num": "9.1" |
|
}, |
|
{ |
|
"text": "We used Jaccard Index (Jaccard, 1912) , to measure the set overlap between the GLUE and BLUE tasks. We found a decreasing trend in overlap when increasing vocabulary size, which was correlated with an increase in downstream task performance. We found that as vocabulary size increased, more vocabulary elements were used in terms of absolute quantities for both the GLUE and BLUE tasks. This could be attributed to fewer words being broken up into sub-word units as vocabulary size increases (Chalkidis et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 22, |
|
"end": 37, |
|
"text": "(Jaccard, 1912)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 492, |
|
"end": 516, |
|
"text": "(Chalkidis et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Measuring Sub-word Overlap", |
|
"sec_num": "9.2" |
|
}, |
|
{ |
|
"text": "For each task, we used tokenized based on whitespace to approximate the vocabulary size needed to represent all words in at task (Table 7) .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 129, |
|
"end": 138, |
|
"text": "(Table 7)", |
|
"ref_id": "TABREF10" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Task Vocabulary Sizes", |
|
"sec_num": "9.3" |
|
}, |
|
{ |
|
"text": "By expanding the vocabulary dimension, fewer overlaps will occur which is shown in Table 2 as a proportion of the overall vocabulary size and Jaccard Index. Though, in absolute terms the number of overlaps increase, suggesting that some overlap between domains does exist and the overlap percentage being approached is similar to the one found in Beltagy et al. (2019) . This is further reflected in Table 3 where the GLUE tasks perform similarly when pretrained on either PubMED or Wikipedia. Suggesting that the pretraining data on its own has enough data to pretrain a general domain model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 347, |
|
"end": 368, |
|
"text": "Beltagy et al. (2019)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 83, |
|
"end": 90, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 400, |
|
"end": 407, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussions", |
|
"sec_num": "9.4" |
|
}, |
|
{ |
|
"text": "Although this not hold true for the specialized domain, which requires both the general domain and specialized domain. Our intuition for pretraining on both Wikipedia and PubMED simultaneously is to reduce the catastrophic forgetting effect (Mc-Closkey and Cohen, 1989) , which may be present in models such as BioBERT (Lee et al., 2019) , and ClinicalBERT (Alsentzer et al., 2019) given that the models are trained sequentially with medical corpora.", |
|
"cite_spans": [ |
|
{ |
|
"start": 241, |
|
"end": 269, |
|
"text": "(Mc-Closkey and Cohen, 1989)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 319, |
|
"end": 337, |
|
"text": "(Lee et al., 2019)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 357, |
|
"end": 381, |
|
"text": "(Alsentzer et al., 2019)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussions", |
|
"sec_num": "9.4" |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "Vincent is supported by the Australian Research Training Program and the CSIRO Research Office Postgraduate Scholarship. This work is funded by the CSIRO Precision Health Future Science Platform.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Publicly available clinical BERT embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Alsentzer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Murphy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Boag", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Hung", |
|
"middle": [], |
|
"last": "Weng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Di", |
|
"middle": [], |
|
"last": "Jindi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tristan", |
|
"middle": [], |
|
"last": "Naumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Mcdermott", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2nd Clinical Natural Language Processing Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "72--78", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-1909" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emily Alsentzer, John Murphy, William Boag, Wei- Hung Weng, Di Jindi, Tristan Naumann, and Matthew McDermott. 2019. Publicly available clini- cal BERT embeddings. In Proceedings of the 2nd Clinical Natural Language Processing Workshop, pages 72-78, Minneapolis, Minnesota, USA. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Automatic semantic classification of scientific literature according to the hallmarks of cancer", |
|
"authors": [ |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Baker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilona", |
|
"middle": [], |
|
"last": "Silins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yufan", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Imran", |
|
"middle": [], |
|
"last": "Ali", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johan", |
|
"middle": [], |
|
"last": "H\u00f6gberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ulla", |
|
"middle": [], |
|
"last": "Stenius", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Korhonen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Bioinformatics", |
|
"volume": "32", |
|
"issue": "3", |
|
"pages": "432--440", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1093/bioinformatics/btv585" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Simon Baker, Ilona Silins, Yufan Guo, Imran Ali, Jo- han H\u00f6gberg, Ulla Stenius, and Anna Korhonen. 2015. Automatic semantic classification of scien- tific literature according to the hallmarks of cancer. Bioinformatics, 32(3):432-440.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "SciB-ERT: A pretrained language model for scientific text", |
|
"authors": [ |
|
{ |
|
"first": "Iz", |
|
"middle": [], |
|
"last": "Beltagy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyle", |
|
"middle": [], |
|
"last": "Lo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arman", |
|
"middle": [], |
|
"last": "Cohan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3606--3611", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1371" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Iz Beltagy, Kyle Lo, and Arman Cohan. 2019. SciB- ERT: A pretrained language model for scientific text. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 3606- 3611, Hong Kong, China. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Prodromos Malakasiotis, Nikolaos Aletras, and Ion Androutsopoulos. 2020. An empirical study on large-scale multi-label text classification including few and zero-shot labels", |
|
"authors": [ |
|
{ |
|
"first": "Ilias", |
|
"middle": [], |
|
"last": "Chalkidis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manos", |
|
"middle": [], |
|
"last": "Fergadiotis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sotiris", |
|
"middle": [], |
|
"last": "Kotitsas", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7503--7515", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.emnlp-main.607" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ilias Chalkidis, Manos Fergadiotis, Sotiris Kotitsas, Prodromos Malakasiotis, Nikolaos Aletras, and Ion Androutsopoulos. 2020. An empirical study on large-scale multi-label text classification including few and zero-shot labels. In Proceedings of the 2020 Conference on Empirical Methods in Natural Lan- guage Processing, pages 7503-7515, Online.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019a. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019b. BERT: Pre-training of Deep Bidirectional Transformers for Language Un- derstanding. In NAACL-HLT, pages 4171-4186, Minneapolis, MN.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Estimating vocabulary size", |
|
"authors": [ |
|
{ |
|
"first": "Alvar", |
|
"middle": [], |
|
"last": "Ellegard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1960, |
|
"venue": "", |
|
"volume": "16", |
|
"issue": "", |
|
"pages": "219--244", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1080/00437956.1960.11659728" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alvar EllegaRd. 1960. Estimating vocabulary size. 16:219-244.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Jianfeng Gao, and Hoifung Poon. 2020. Domainspecific language model pretraining for biomedical natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Tinn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Lucas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naoto", |
|
"middle": [], |
|
"last": "Usuyama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tristan", |
|
"middle": [], |
|
"last": "Naumann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yu Gu, Robert Tinn, Hao Cheng, Michael Lucas, Naoto Usuyama, Xiaodong Liu, Tristan Naumann, Jianfeng Gao, and Hoifung Poon. 2020. Domain- specific language model pretraining for biomedical natural language processing. Computing Research Repository.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Don't stop pretraining: Adapt language models to domains and tasks", |
|
"authors": [ |
|
{ |
|
"first": "Ana", |
|
"middle": [], |
|
"last": "Suchin Gururangan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Swabha", |
|
"middle": [], |
|
"last": "Marasovi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyle", |
|
"middle": [], |
|
"last": "Swayamdipta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iz", |
|
"middle": [], |
|
"last": "Lo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Doug", |
|
"middle": [], |
|
"last": "Beltagy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Downey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "8342--8360", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.740" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Suchin Gururangan, Ana Marasovi\u0107, Swabha Swayamdipta, Kyle Lo, Iz Beltagy, Doug Downey, and Noah A. Smith. 2020. Don't stop pretraining: Adapt language models to domains and tasks. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 8342-8360, Online.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "The ddi corpus: An annotated corpus with pharmacological substances and drug-drug interactions", |
|
"authors": [ |
|
{ |
|
"first": "Mar\u00eda", |
|
"middle": [], |
|
"last": "Herrero-Zazo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Isabel", |
|
"middle": [], |
|
"last": "Segura-Bedmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paloma", |
|
"middle": [], |
|
"last": "Mart\u00ednez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thierry", |
|
"middle": [], |
|
"last": "Declerck", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Journal of Biomedical Informatics", |
|
"volume": "46", |
|
"issue": "5", |
|
"pages": "914--920", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.jbi.2013.07.011" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mar\u00eda Herrero-Zazo, Isabel Segura-Bedmar, Paloma Mart\u00ednez, and Thierry Declerck. 2013. The ddi corpus: An annotated corpus with pharmacological substances and drug-drug interactions. Journal of Biomedical Informatics, 46(5):914 -920.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "The distribution of the flora in the alpine zone", |
|
"authors": [ |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Jaccard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1912, |
|
"venue": "New Phytologist", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "37--50", |
|
"other_ids": { |
|
"DOI": [ |
|
"https://nph.onlinelibrary.wiley.com/doi/abs/10.1111/j.1469-8137.1912.tb05611.x" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paul Jaccard. 1912. The distribution of the flora in the alpine zone.1. New Phytologist, 11:37-50.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "MIMIC-III, a freely accessible critical care database", |
|
"authors": [ |
|
{ |
|
"first": "Alistair", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Pollard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lu", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li-Wei", |
|
"middle": [], |
|
"last": "Lehman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mengling", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Ghassemi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Moody", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Szolovits", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Celi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roger", |
|
"middle": [], |
|
"last": "Mark", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Scientific Data", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1038/sdata.2016.35" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alistair Johnson, Tom Pollard, Lu Shen, Li-wei Lehman, Mengling Feng, Mohammad Ghassemi, Benjamin Moody, Peter Szolovits, Anthony Celi, and Roger Mark. 2016a. MIMIC-III, a freely ac- cessible critical care database. Scientific Data, 3:160035.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Mimic-iii, a freely accessible critical care database", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Alistair", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Tom", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lu", |
|
"middle": [], |
|
"last": "Pollard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H Lehman", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mengling", |
|
"middle": [], |
|
"last": "Li-Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Ghassemi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Moody", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leo", |
|
"middle": [ |
|
"Anthony" |
|
], |
|
"last": "Szolovits", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roger G", |
|
"middle": [], |
|
"last": "Celi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mark", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alistair EW Johnson, Tom J Pollard, Lu Shen, H Lehman Li-wei, Mengling Feng, Moham- mad Ghassemi, Benjamin Moody, Peter Szolovits, Leo Anthony Celi, and Roger G Mark. 2016b. Mimic-iii, a freely accessible critical care database. Scientific data, 3:160035.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Overview of the biocreative vi chemical-protein interaction track", |
|
"authors": [ |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Krallinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Rabal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Akhondi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "P\u00e9rez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Santamar\u00eda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gael", |
|
"middle": [ |
|
"P\u00e9rez" |
|
], |
|
"last": "Rodr\u00edguez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Tsatsaronis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Ander Intxaurrondo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Umesh", |
|
"middle": [], |
|
"last": "L\u00f3pez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Nandal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Buel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marleen", |
|
"middle": [], |
|
"last": "Chandrasekhar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Rodenburg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marius", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Laegreid", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Doornenbal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Oyarz\u00e1bal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Louren\u00e7o", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Valencia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of BioCreative", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "141--146", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martin Krallinger, O. Rabal, S. A. Akhondi, M. P\u00e9rez, J. Santamar\u00eda, Gael P\u00e9rez Rodr\u00edguez, G. Tsatsaronis, Ander Intxaurrondo, J. A. L\u00f3pez, Umesh Nandal, E. V. Buel, A. Chandrasekhar, Marleen Rodenburg, A.G Laegreid, Marius A. Doornenbal, J. Oyarz\u00e1bal, A. Louren\u00e7o, and A. Valencia. 2017. Overview of the biocreative vi chemical-protein interaction track. In Proceedings of BioCreative, pages 141-146.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "ALBERT: A Lite BERT for Self-supervised Learning of Language Representations", |
|
"authors": [ |
|
{ |
|
"first": "Zhenzhong", |
|
"middle": [], |
|
"last": "Lan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mingda", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Goodman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Gimpel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piyush", |
|
"middle": [], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Radu", |
|
"middle": [], |
|
"last": "Soricut", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 8th International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, and Radu Soricut. 2020. ALBERT: A Lite BERT for Self-supervised Learning of Language Representations. In Proceed- ings of the 8th International Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "BioBERT: A pretrained biomedical language representation model for biomedical text mining", |
|
"authors": [ |
|
{ |
|
"first": "Jinhyuk", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wonjin", |
|
"middle": [], |
|
"last": "Yoon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sungdong", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Donghyeon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sunkyu", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chan", |
|
"middle": [], |
|
"last": "Ho So", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaewoo", |
|
"middle": [], |
|
"last": "Kang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Bioinformatics", |
|
"volume": "36", |
|
"issue": "4", |
|
"pages": "1234--1240", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jinhyuk Lee, Wonjin Yoon, Sungdong Kim, Donghyeon Kim, Sunkyu Kim, Chan Ho So, and Jaewoo Kang. 2019. BioBERT: A pre- trained biomedical language representation model for biomedical text mining. Bioinformatics, 36(4):1234-1240.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Catastrophic interference in connectionist networks: The sequential learning problem", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Mccloskey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Neal", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Cohen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1989, |
|
"venue": "Psychology of Learning and Motivation", |
|
"volume": "24", |
|
"issue": "", |
|
"pages": "109--165", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/S0079-7421(08)60536-8" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael McCloskey and Neal J. Cohen. 1989. Catas- trophic interference in connectionist networks: The sequential learning problem. volume 24 of Psy- chology of Learning and Motivation, pages 109-165. Academic Press.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Investigating the effect of lexical segmentation in transformer-based models on medical datasets", |
|
"authors": [ |
|
{ |
|
"first": "Vincent", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sarvnaz", |
|
"middle": [], |
|
"last": "Karimi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhenchang", |
|
"middle": [], |
|
"last": "Xing", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 17th Annual Workshop of the Australasian Language Technology Association", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "165--171", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vincent Nguyen, Sarvnaz Karimi, and Zhenchang Xing. 2019. Investigating the effect of lexical seg- mentation in transformer-based models on medical datasets. In Proceedings of the 17th Annual Work- shop of the Australasian Language Technology As- sociation, pages 165-171, Sydney, Australia. Aus- tralasian Language Technology Association.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Transfer learning in biomedical natural language processing: An evaluation of BERT and ELMo on ten benchmarking datasets", |
|
"authors": [ |
|
{ |
|
"first": "Yifan", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shankai", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyong", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Workshop on Biomedical Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "58--65", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yifan Peng, Shankai Yan, and Zhiyong Lu. 2019. Transfer learning in biomedical natural language processing: An evaluation of BERT and ELMo on ten benchmarking datasets. In Proceedings of the Workshop on Biomedical Natural Language Process- ing, pages 58-65, Florence, Italy.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Inexpensive domain adaptation of pretrained language models: Case studies on biomedical NER and covid-19 QA", |
|
"authors": [ |
|
{ |
|
"first": "Nina", |
|
"middle": [], |
|
"last": "Poerner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ulli", |
|
"middle": [], |
|
"last": "Waltinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hinrich", |
|
"middle": [], |
|
"last": "Sch\u00fctze", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Findings of the Association for Computational Linguistics: EMNLP 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1482--1490", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.findings-emnlp.134" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nina Poerner, Ulli Waltinger, and Hinrich Sch\u00fctze. 2020. Inexpensive domain adaptation of pretrained language models: Case studies on biomedical NER and covid-19 QA. In Findings of the Association for Computational Linguistics: EMNLP 2020, pages 1482-1490, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Neural machine translation of rare words with subword units", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1715--1725", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P16-1162" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural machine translation of rare words with subword units. In Proceedings of the 54th Annual Meeting of the Association for Computational Lin- guistics (Volume 1: Long Papers), pages 1715-1725, Berlin, Germany.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "BioMegatron: Larger biomedical domain language model", |
|
"authors": [ |
|
{ |
|
"first": "Hoo-Chang", |
|
"middle": [], |
|
"last": "Shin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evelina", |
|
"middle": [], |
|
"last": "Bakhturina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raul", |
|
"middle": [], |
|
"last": "Puri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mostofa", |
|
"middle": [], |
|
"last": "Patwary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Shoeybi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raghav", |
|
"middle": [], |
|
"last": "Mani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4700--4706", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.emnlp-main.379" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hoo-Chang Shin, Yang Zhang, Evelina Bakhturina, Raul Puri, Mostofa Patwary, Mohammad Shoeybi, and Raghav Mani. 2020. BioMegatron: Larger biomedical domain language model. In Proceed- ings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 4700-4706, Online.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Enhancing clinical concept extraction with contextual embedding", |
|
"authors": [ |
|
{ |
|
"first": "Yuqi", |
|
"middle": [], |
|
"last": "Si", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kirk", |
|
"middle": [], |
|
"last": "Roberts", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Journal of the American Medical Informatics Association : JAMIA", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yuqi Si, J. Wang, H. Xu, and Kirk Roberts. 2019. En- hancing clinical concept extraction with contextual embedding. Journal of the American Medical Infor- matics Association : JAMIA.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Effects of inserting domain vocabulary and fine-tuning bert for german legal language", |
|
"authors": [ |
|
{ |
|
"first": "Chin Man Yeung", |
|
"middle": [], |
|
"last": "Tai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chin Man Yeung Tai. 2019. Effects of inserting do- main vocabulary and fine-tuning bert for german le- gal language. Master's thesis, University of Twente, Netherlands.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "exBERT: Extending pre-trained models with domain-specific vocabulary under constrained training resources", |
|
"authors": [ |
|
{ |
|
"first": "Wen", |
|
"middle": [], |
|
"last": "Tai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Kung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xin", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcus", |
|
"middle": [], |
|
"last": "Comiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chang-Fu", |
|
"middle": [], |
|
"last": "Kuo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Findings of the Association for Computational Linguistics: EMNLP 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1433--1439", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.findings-emnlp.129" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wen Tai, H. T. Kung, Xin Dong, Marcus Comiter, and Chang-Fu Kuo. 2020. exBERT: Extending pre-trained models with domain-specific vocabulary under constrained training resources. In Findings of the Association for Computational Linguistics: EMNLP 2020, pages 1433-1439, Online. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Determining domain-specific differences of polysemous words using context information", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Toews", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leif", |
|
"middle": [], |
|
"last": "Van Holland", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 25th International Working Conference on Requirement Engineering: Foundation for Software Quality", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Toews and Leif Van Holland. 2019. Deter- mining domain-specific differences of polysemous words using context information. In Proceedings of the 25th International Working Conference on Requirement Engineering: Foundation for Software Quality.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. Computing Research Repository, abs/1706.03762.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "GLUE: A multi-task benchmark and analysis platform for natural language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanpreet", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "the Proceedings of International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "353--355", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. 2019a. GLUE: A multi-task benchmark and analysis plat- form for natural language understanding. In In the Proceedings of International Conference on Learn- ing Representations, pages 353-355, Brussels, Bel- gium.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Improving pre-trained multilingual model with vocabulary expansion", |
|
"authors": [ |
|
{ |
|
"first": "Hai", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dian", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianshu", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dong", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "316--327", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/K19-1030" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hai Wang, Dian Yu, Kai Sun, Jianshu Chen, and Dong Yu. 2019b. Improving pre-trained multilin- gual model with vocabulary expansion. In Proceed- ings of the 23rd Conference on Computational Nat- ural Language Learning (CoNLL), pages 316-327, Hong Kong, China. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Huggingface's transformers: State-of-the-art natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clement", |
|
"middle": [], |
|
"last": "Delangue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Moi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierric", |
|
"middle": [], |
|
"last": "Cistac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R\u00e9mi", |
|
"middle": [], |
|
"last": "Louf", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, R\u00e9mi Louf, Morgan Funtow- icz, and Jamie Brew. 2019. Huggingface's trans- formers: State-of-the-art natural language process- ing. Computing Research Repository.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Yonghui", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhifeng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Norouzi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wolfgang", |
|
"middle": [], |
|
"last": "Macherey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maxim", |
|
"middle": [], |
|
"last": "Krikun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuan", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qin", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klaus", |
|
"middle": [], |
|
"last": "Macherey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Klingner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Apurva", |
|
"middle": [], |
|
"last": "Shah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Melvin", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaobing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephan", |
|
"middle": [], |
|
"last": "Gouws", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshikiyo", |
|
"middle": [], |
|
"last": "Kato", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Taku", |
|
"middle": [], |
|
"last": "Kudo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hideto", |
|
"middle": [], |
|
"last": "Kazawa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Keith", |
|
"middle": [], |
|
"last": "Stevens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "George", |
|
"middle": [], |
|
"last": "Kurian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nishant", |
|
"middle": [], |
|
"last": "Patil", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V. Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, Jeff Klingner, Apurva Shah, Melvin Johnson, Xiaobing Liu, \u0141ukasz Kaiser, Stephan Gouws, Yoshikiyo Kato, Taku Kudo, Hideto Kazawa, Keith Stevens, George Kurian, Nishant Patil, Wei Wang, Cliff Young, Jason Smith, Ja- son Riesa, Alex Rudnick, Oriol Vinyals, Greg Corrado, Macduff Hughes, and Jeffrey Dean. 2016a. Google's Neural Machine Translation Sys- tem: Bridging the Gap between Human and Ma- chine Translation.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation. Computing Research Repository", |
|
"authors": [ |
|
{ |
|
"first": "Yonghui", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhifeng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Norouzi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wolfgang", |
|
"middle": [], |
|
"last": "Macherey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maxim", |
|
"middle": [], |
|
"last": "Krikun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuan", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qin", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klaus", |
|
"middle": [], |
|
"last": "Macherey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Klingner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Apurva", |
|
"middle": [], |
|
"last": "Shah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Melvin", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaobing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephan", |
|
"middle": [], |
|
"last": "Gouws", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshikiyo", |
|
"middle": [], |
|
"last": "Kato", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Taku", |
|
"middle": [], |
|
"last": "Kudo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hideto", |
|
"middle": [], |
|
"last": "Kazawa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Keith", |
|
"middle": [], |
|
"last": "Stevens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "George", |
|
"middle": [], |
|
"last": "Kurian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nishant", |
|
"middle": [], |
|
"last": "Patil", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1609.08144" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V. Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, Jeff Klingner, Apurva Shah, Melvin Johnson, Xiaobing Liu, \u0141ukasz Kaiser, Stephan Gouws, Yoshikiyo Kato, Taku Kudo, Hideto Kazawa, Keith Stevens, George Kurian, Nishant Patil, Wei Wang, Cliff Young, Jason Smith, Ja- son Riesa, Alex Rudnick, Oriol Vinyals, Greg Corrado, Macduff Hughes, and Jeffrey Dean. 2016b. Google's Neural Machine Translation Sys- tem: Bridging the Gap between Human and Ma- chine Translation. Computing Research Repository, page arXiv:1609.08144.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Large batch optimization for deep learning: Training bert in 76 minutes", |
|
"authors": [ |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "You", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jing", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sashank", |
|
"middle": [], |
|
"last": "Reddi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Hseu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanjiv", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Srinadh", |
|
"middle": [], |
|
"last": "Bhojanapalli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodan", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Demmel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kurt", |
|
"middle": [], |
|
"last": "Keutzer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cho-Jui", |
|
"middle": [], |
|
"last": "Hsieh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 7th International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang You, Jing Li, Sashank Reddi, Jonathan Hseu, Sanjiv Kumar, Srinadh Bhojanapalli, Xiaodan Song, James Demmel, Kurt Keutzer, and Cho-Jui Hsieh. 2020. Large batch optimization for deep learning: Training bert in 76 minutes. In Proceedings of the 7th International Conference on Learning Represen- tations, Online.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "The Psychobiology of Language", |
|
"authors": [ |
|
{ |
|
"first": "George", |
|
"middle": [], |
|
"last": "Zipf", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1936, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "George Zipf. 1936. The Psychobiology of Language. London, Routledge.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Evaluation of the biomedical tasks against varied |V |. A bold blue circle indicates the peak of the curve.", |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"TABREF0": { |
|
"text": ").", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td colspan=\"7\">|V | CoLA MNLI MRPC QNLI QQP RTE SST-2 STS-B WNLI</td></tr><tr><td>5000 13.7</td><td>64.9</td><td>79.6</td><td>64.3</td><td>75.7 56.7 78.0</td><td>17.7</td><td>53.5</td></tr><tr><td>10000 12.8</td><td>70.1</td><td>79.3</td><td>67.4</td><td>79.9 51.3 80.5</td><td>19.9</td><td>33.8</td></tr><tr><td>20000 9.30</td><td>70.8</td><td>78.6</td><td>78.8</td><td>81.5 51.6 83.4</td><td>61.0</td><td>46.5</td></tr><tr><td>30000 20.7</td><td>70.6</td><td>78.4</td><td>78.8</td><td>81.3 54.9 83.1</td><td>63.7</td><td>56.3</td></tr><tr><td>40000 14.8</td><td>71.2</td><td>80.9</td><td>78.7</td><td>80.0 54.5 82.3</td><td>21.3</td><td>43.7</td></tr><tr><td>50000 15.3</td><td>71.1</td><td>79.2</td><td>79.5</td><td>80.9 54.5 83.4</td><td>28.9</td><td>46.5</td></tr><tr><td>60000 16.9</td><td>71.4</td><td>77.3</td><td>79.6</td><td>80.3 53.1 82.1</td><td>25.6</td><td>42.3</td></tr><tr><td>70000 17.4</td><td>71.0</td><td>79.3</td><td>78.8</td><td>79.7 55.6 85.7</td><td>26.9</td><td>36.6</td></tr><tr><td>80000 17.3</td><td>71.0</td><td>80.3</td><td>79.0</td><td>81.3 53.8 84.8</td><td>31.0</td><td>56.3</td></tr><tr><td>90000 21.2</td><td>71.1</td><td>80.1</td><td>79.6</td><td>81.2 50.2 84.3</td><td>25.8</td><td>56.3</td></tr><tr><td>100000 21.9</td><td>71.3</td><td>79.0</td><td>79.0</td><td>80.4 52.4 83.7</td><td>34.5</td><td>46.5</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"text": "Jaccard Index Num. Overlaps Num. Overlaps/|V | |V | in use", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td>5000</td><td>94.6</td><td>4710</td><td>94.2%</td><td>99.5%</td></tr><tr><td>10000</td><td>87.8</td><td>8730</td><td>87.3%</td><td>99.5%</td></tr><tr><td>20000</td><td>73.8</td><td>14600</td><td>73.0%</td><td>99.0%</td></tr><tr><td>30000</td><td>62.8</td><td>18480</td><td>61.6%</td><td>98.2%</td></tr><tr><td>40000</td><td>54.6</td><td>21200</td><td>53.0%</td><td>97.1%</td></tr><tr><td>50000</td><td>48.2</td><td>23100</td><td>46.2%</td><td>95.8%</td></tr><tr><td>60000</td><td>43.0</td><td>24360</td><td>40.6%</td><td>94.4%</td></tr><tr><td>70000</td><td>38.9</td><td>25200</td><td>36.0%</td><td>92.7%</td></tr><tr><td>80000</td><td>35.6</td><td>25840</td><td>32.3%</td><td>90.8%</td></tr><tr><td>90000</td><td>32.8</td><td>26280</td><td>29.2%</td><td>89.1%</td></tr><tr><td colspan=\"2\">100000 30.4</td><td>26600</td><td>26.6%</td><td>87.3%</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF2": { |
|
"text": "Jaccard Index and overlap proportion for varying vocabulary sizes.", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>", |
|
"num": null |
|
}, |
|
"TABREF4": { |
|
"text": "", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td>Domain</td><td>Task</td><td>S</td><td>L</td><td>L-S</td></tr><tr><td/><td>CoLA</td><td>14.3</td><td>14.7</td><td>+0.40</td></tr><tr><td/><td>MNLI</td><td>69.5</td><td>71.1 \u2020</td><td>+1.60</td></tr><tr><td/><td>MRPC</td><td>79.4</td><td>79.6</td><td>+0.20</td></tr><tr><td/><td>QNLI</td><td>73.6</td><td>79.3</td><td>+5.70</td></tr><tr><td>General Domain</td><td>QQP</td><td>79.7</td><td>80.6</td><td>+0.90</td></tr><tr><td/><td>RTE</td><td>53.8</td><td>53.5</td><td>-0.50</td></tr><tr><td/><td>SST-2</td><td>81.5</td><td>84.0 \u2020</td><td>+2.50</td></tr><tr><td/><td>STS-B</td><td>36.7</td><td>28.8</td><td>-7.90</td></tr><tr><td/><td>WNLI</td><td>46.8</td><td>47.5</td><td>+0.70</td></tr><tr><td/><td>biosses</td><td>13.6</td><td>19.0</td><td>+5.40</td></tr><tr><td/><td>chemprot</td><td>59.4</td><td>65.2</td><td>+5.80</td></tr><tr><td>Biomedical</td><td>DDI</td><td>66.9</td><td>71.2 \u2020</td><td>+4.30</td></tr><tr><td/><td>HoC</td><td>81.4</td><td>82.1</td><td>+0.70</td></tr><tr><td/><td>MedNLI</td><td>67.6</td><td>70.2 \u2020</td><td>+2.60</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF5": { |
|
"text": "", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td>: Performance of vocabulary sizes larger (L)</td></tr><tr><td>than 50,000, and vocabulary sizes smaller (S) than</td></tr><tr><td>50,000 on language understanding general (GLUE) and</td></tr><tr><td>biomedical (BLUE) tasks. An independent t-test is</td></tr><tr><td>used to calculate statistical significance (P < 0.05) de-</td></tr><tr><td>noted by \u2020. Metrics are given in Appendix 8.2.</td></tr><tr><td>)6FRUH</td></tr><tr><td>WDVN</td></tr><tr><td>FKHPSURW</td></tr><tr><td>'',</td></tr><tr><td>KRF</td></tr><tr><td>PHGQOL</td></tr><tr><td>|V|</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF7": { |
|
"text": "Expanded results fromTable 3.", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>", |
|
"num": null |
|
}, |
|
"TABREF8": { |
|
"text": "Detailed results from Table 2, including statistics for the number of unique tokens used the BLUE and GLUE tasks.", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>", |
|
"num": null |
|
}, |
|
"TABREF10": { |
|
"text": "Unique vocabulary elements (whole words delimited by spaces)", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |