|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T10:41:26.691503Z" |
|
}, |
|
"title": "HULK: An Energy Efficiency Benchmark Platform for Responsible Natural Language Processing", |
|
"authors": [ |
|
{ |
|
"first": "Xiyou", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of California Santa Barbara", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Zhiyu", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of California Santa Barbara", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Xiaoyong", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of California Santa Barbara", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [ |
|
"Yang" |
|
], |
|
"last": "Wang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of California Santa Barbara", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Computation-intensive pretrained models have been taking the lead of many natural language processing benchmarks such as GLUE (Wang et al., 2018). However, energy efficiency in the process of model training and inference becomes a critical bottleneck. We introduce HULK, a multi-task energy efficiency benchmarking platform for responsible natural language processing. With HULK, we compare pretrained models' energy efficiency from the perspectives of time and cost. Baseline benchmarking results are provided for further analysis. The fine-tuning efficiency of different pretrained models can differ significantly among different tasks, and fewer parameter number does not necessarily imply better efficiency. We analyzed such a phenomenon and demonstrated the method for comparing the multi-task efficiency of pretrained models. Our platform is available at https:// hulkbenchmark.github.io/.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Computation-intensive pretrained models have been taking the lead of many natural language processing benchmarks such as GLUE (Wang et al., 2018). However, energy efficiency in the process of model training and inference becomes a critical bottleneck. We introduce HULK, a multi-task energy efficiency benchmarking platform for responsible natural language processing. With HULK, we compare pretrained models' energy efficiency from the perspectives of time and cost. Baseline benchmarking results are provided for further analysis. The fine-tuning efficiency of different pretrained models can differ significantly among different tasks, and fewer parameter number does not necessarily imply better efficiency. We analyzed such a phenomenon and demonstrated the method for comparing the multi-task efficiency of pretrained models. Our platform is available at https:// hulkbenchmark.github.io/.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Environmental concerns of machine learning research have been rising as the carbon emission of specific tasks like neural architecture search reached an exceptional \"ocean boiling\" level (Strubell et al., 2019) . Increased carbon emission has been one of the key factors to aggravate global warming 1 . Research and development processes like parameter search further increase the environmental impact. When using cloud-based machines, the environmental impact is strongly correlated with the financial cost.", |
|
"cite_spans": [ |
|
{ |
|
"start": 187, |
|
"end": 210, |
|
"text": "(Strubell et al., 2019)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The recent emergence of leaderboards such as SQuAD (Rajpurkar et al., 2016) , GLUE (Wang et al., 2018) , and SuperGLUE (Wang et al., 2019) has greatly boosted the development of advanced 1 Source: https://climate.nasa.gov/causes/ models in the NLP community. Pretrained models have proven to be the key ingredient for achieving state-of-the-art in conventional metrics. However, such models can be costly to train. For example, XLNet-Large (Yang et al., 2019) was trained on 512 TPU v3 chips for 500K steps, which costs around 61,440 dollars 2 , let alone staggeringly large carbon emission.", |
|
"cite_spans": [ |
|
{ |
|
"start": 51, |
|
"end": 75, |
|
"text": "(Rajpurkar et al., 2016)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 83, |
|
"end": 102, |
|
"text": "(Wang et al., 2018)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 119, |
|
"end": 138, |
|
"text": "(Wang et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 440, |
|
"end": 459, |
|
"text": "(Yang et al., 2019)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Moreover, despite impressive performance gain, the fine-tuning and inference efficiency of NLP models remain under-explored. As recently mentioned in a tweet 3 , the popular AI text adventure game AI Dungeon has reached 100 million inferences. The energy efficiency of inference cost could be critical to both business planning and environmental impact.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Previous work (Schwartz et al., 2019; Dodge et al., 2019) on this topic proposed new metrics like FPO (floating-point operations) and other practices to report experimental results based on computing budget. Other benchmarks like (Coleman et al., 2017) and (Mattson et al., 2019) compare the efficiency of models on the classic reading comprehension task SQuAD and machine translation tasks. However, there has not been any concrete or practical reference for accurate estimation on NLP model pretraining, fine-tunning, and inference considering multi-task energy efficiency.", |
|
"cite_spans": [ |
|
{ |
|
"start": 14, |
|
"end": 37, |
|
"text": "(Schwartz et al., 2019;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 38, |
|
"end": 57, |
|
"text": "Dodge et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 230, |
|
"end": 252, |
|
"text": "(Coleman et al., 2017)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 257, |
|
"end": 279, |
|
"text": "(Mattson et al., 2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Energy efficiency can be reflected in many metrics, including carbon emission, electricity usage, time consumption, number of parameters, and FPO, as shown in (Schwartz et al., 2019) . Carbon emission and electricity are intuitive measures yet either hard to track or hardware-dependent. The number of model parameters does not reflect the actual cost for model training and inference. FPO", |
|
"cite_spans": [ |
|
{ |
|
"start": 159, |
|
"end": 182, |
|
"text": "(Schwartz et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Hardware Time Cost Params BERT BASE (Devlin et al., 2018) 4 TPU v2 Pods 4 days $1,728 108M BERT LARGE (Devlin et al., 2018) 16 TPU v2 Pods 4 days $6,912 334M XLNet BASE (Yang et al., 2019) ---117M XLNet LARGE (Yang et al., 2019) 512 TPU v3 2.5 days $61,440 361M RoBERTa BASE (Liu et al., 2019) 1024 V100 GPUs 1 day $75,203 125M RoBERTa LARGE (Liu et al., 2019) 1024 V100 GPUs 1 day $75,203 356M ALBERT BASE (Lan et al., 2019) 64 TPU v3 --12M ALBERT XXLARGE (Lan et al., 2019) 1024 TPU v3 32 hours $65,536 223M DistilBERT* 8\u00d716G V100 GPU 90 hours $2,203 66M ELECTRA SMALL (Clark et al., 2020) 1 V100 GPU 96 hours $294 14M ELECTRA BASE (Clark et al., 2020) 16 TPU v3 96 hours $3,072 110M Table 1 : Pretraining costs of baseline models. Hardware and pretraining time were collected from original papers, with which costs were estimated with the current TPU price at $8 per hour with 4 core TPU v3 chips and V100 GPU at $3.06 per hour. DistilBERT model was trained upon a pretrained BERT model. Parameter numbers are estimated using the pretrained models implemented in the Transformers (https://github.com/huggingface/ transformers) library , shown in millions. The RoBERTa model was trained for 100K steps.", |
|
"cite_spans": [ |
|
{ |
|
"start": 36, |
|
"end": 57, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 102, |
|
"end": 123, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 169, |
|
"end": 188, |
|
"text": "(Yang et al., 2019)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 209, |
|
"end": 228, |
|
"text": "(Yang et al., 2019)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 275, |
|
"end": 293, |
|
"text": "(Liu et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 342, |
|
"end": 360, |
|
"text": "(Liu et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 407, |
|
"end": 425, |
|
"text": "(Lan et al., 2019)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 457, |
|
"end": 475, |
|
"text": "(Lan et al., 2019)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 571, |
|
"end": 591, |
|
"text": "(Clark et al., 2020)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 634, |
|
"end": 654, |
|
"text": "(Clark et al., 2020)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 686, |
|
"end": 693, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "is steady for models but cannot be directly used for cost estimation. Here, to provide a practical reference for model selection on real applications, especially model development outside academia, we keep track of the time consumption and actual financial cost for comparison. Cloud-based machines are employed for budget estimation as they are easily accessible and consistent in hardware configuration, price, and performance. In the following sections, we would use \"time\" and \"cost\" to denote the time elapsed and the actual budget in model pretraining, training, and inference.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In most NLP pretrained model settings, there are three phases: pretraining, fine-tuning, and inference. If a model is trained from scratch, we consider such a model has no pretraining phase but is fine-tuned from scratch. Typically pretraining takes several days and hundreds of dollars, according to Table 1 . Fine-tuning takes a few minutes to hours, costing much less than the pretraining phase. Inference takes several milliseconds to seconds, similarly costing much less than the fine-tuning phase. Meanwhile, pretraining is done before fine-tuning once for all, while fine-tuning could be performed multiple times as training data updates. Inference is expected to be called numerous times for downstream applications. Such characteristics make it a natural choice to separate different phases during benchmarking.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 301, |
|
"end": 308, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Our HULK benchmark, as shown in Figure 1 , utilizes several classic datasets that have been widely adopted in the community as benchmarking tasks to benchmark energy efficiency. The benchmark compares pretrained models in a multi-task fashion. The tasks include natural language inference task MNLI (Williams et al., 2017) , sentiment analysis task SST-2 (Socher et al., 2013) and Named Entity Recognition Task CoNLL-2003 (Sang and De Meulder, 2003) . Such tasks are selected to provide a thorough comparison of end-to-end energy efficiency in pretraining, fine-tuning, and inference.", |
|
"cite_spans": [ |
|
{ |
|
"start": 299, |
|
"end": 322, |
|
"text": "(Williams et al., 2017)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 355, |
|
"end": 376, |
|
"text": "(Socher et al., 2013)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 411, |
|
"end": 431, |
|
"text": "CoNLL-2003 (Sang and", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 432, |
|
"end": 449, |
|
"text": "De Meulder, 2003)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 32, |
|
"end": 40, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "With the HULK benchmark, we quantify the energy efficiency of model pretraining, fine-tuning, and inference phase by comparing the time and cost they require to reach a certain overall task-specific performance level on selected datasets. The design principle and benchmarking process are detailed in section 2. We also explore the relation between model parameters and fine-tuning efficiency and demonstrate energy efficiency consistency between different pretrained models' tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For the pretraining phase, the benchmark is designed to favor energy-efficient models in terms of time and cost that each model takes to reach specific multi-task performance pretrained from scratch. For example, we keep track of the time and cost of a BERT model in the following way: After every thousand pretraining steps, we clone the model for fine-tuning and see if the final performance can reach our cut-off performance on different tasks. When the level is reached, time and cost for pretraining are used for comparison. Mod- We consider the time and cost each model takes to reach specific multi-task performance fine-tuned from given pretrained models for the fine-tuning phase. For each task with different difficulty and instance numbers, the fine-tuning characteristics may differ a lot. When pretrained models are used to deal with a non-standard downstream task, especially ad hoc application in industry, the task's fine-tuning time and cost cannot be estimated directly from any other standard task. Therefore, it is essential to compare the multi-task efficiency for model choice.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Benchmark Overview", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "For the inference phase, each model's time and cost for making inference on a single instance on multiple tasks are compared similarly to the fine-tuning phase. Specially, we estimate the time elapsed for each inference by averaging thousands of inference samples.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Benchmark Overview", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The datasets we used are widely adopted in the NLP community. Quantitative details of datasets can be found in Table 2 . The selected tasks are shown below:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 111, |
|
"end": 118, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset Overview", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "CoNLL 2003 The Conference on Computational Natural Language Learning (CoNLL-2003) shared task concerns languageindependent named entity recognition (Sang and De Meulder, 2003) . The task concentrates on four types of named entities: persons, locations, organizations, and other miscellaneous entities. Here we only use the English dataset. The English data is a collection of news wire articles from the Reuters Corpus. The result is reflected as F1 score considering the label accuracy and recall on the dev set.", |
|
"cite_spans": [ |
|
{ |
|
"start": 148, |
|
"end": 175, |
|
"text": "(Sang and De Meulder, 2003)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset Overview", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "MNLI The Multi-Genre Natural Language Inference Corpus (Williams et al., 2017 ) is a crowdsourced collection of sentence pairs with textual entailment annotations. Given a premise sentence and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis (entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are gathered from ten different sources, including transcribed speech, fiction, and government reports. The accuracy score is reported as the average of performance on matched and mismatched dev sets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 55, |
|
"end": 77, |
|
"text": "(Williams et al., 2017", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset Overview", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The Stanford Sentiment Treebank (Socher et al., 2013) consists of sentences from movie reviews and human annotations of their sentiment. The task is to predict the sentiment of a given sentence. Following the setting of GLUE, we also use the two-way (positive/negative) class split and use only sentencelevel labels.", |
|
"cite_spans": [ |
|
{ |
|
"start": 32, |
|
"end": 53, |
|
"text": "(Socher et al., 2013)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SST-2", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The tasks are selected based on how representative the dataset is. CoNLL 2003 has been a widely used dataset for named entity recognition and requires the output of token-level labeling. NER is a core NLP task, and CoNLL 2003 has been a classic dataset in this area. SST-2 and MNLI are part of the GLUE benchmark, representing sentence-level labeling tasks. SST-2 has been frequently used in sentiment analysis across different generations of models. MNLI is a newly introduced large dataset for natural language inference. The training time for MNLI is relatively long, and the task requires a lot more training instances. We select the three tasks for a diverse yet practical benchmark for pretrained models without constraining the models on sentence-level classification tasks. Besides, their efficiency differs significantly in the fine-tuning and inference phase. Such a difference can still be reflected in the final score after normalization, as shown in Table 3 . Provided with more computing resources, we can bring in more datasets for even more thorough benchmarking in the future. We illustrate the evaluation criteria in the following subsection.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 963, |
|
"end": 970, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "SST-2", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In machine learning model training and inference, slight parameter change can subtly impact the final result. To make a practical reference for pretrained model selection, we compare models' end-to-end performance concerning the pretraining time and cost, fine-tuning time and cost, inference time and cost following the setting of (Coleman et al., 2017) . Table 3 : Multi-task Baseline Fine-tuning Costs. Time is given in seconds and score is computed by the division of Time BERTLARGE /Time model .The experiments are conducted on a single RTX 2080 Ti GPU following the evaluation criterion. The overall score is computed by summing up the scores of each task. We also use the cost to compute a new score for each task for cost-based leaderboards and summarize similarly. \"N/A\" means fail to reach the given performance after five epochs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 332, |
|
"end": 354, |
|
"text": "(Coleman et al., 2017)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 357, |
|
"end": 364, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation Criteria", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "For the pretraining phase, we design the protocol to explore how much computing resource is required to reach specific final multi-task performance via fine-tuning after the pretraining. Therefore, during model pretraining, after every thousand pretraining steps, we use the current pretrained model for fine-tuning and see if the finetuned model can reach our cut-off performance. When it does, we count the time and cost in the pretraining process for benchmarking and analysis.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Criteria", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "For the fine-tuning phase, we want to compare the general efficiency of the pretrained model reaching cut-off performance on the selected dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Criteria", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "During fine-tuning, we evaluate the current finetuned model on the development set after a certain small number of fine-tuning steps. When the performance reaches our cut-off performance, we count the time and cost in this fine-tuning process for benchmarking and analysis. To be specific, for a single pretrained model, the efficiency score on different tasks is defined as the sum of normalized time and cost. Here we normalize the time and cost because they vary dramatically between tasks. To simplify the process, we compute the ratio of BERT LARGE 's time and cost to that of each model as the normalized measure, as shown in Table 3 and Table 4 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 632, |
|
"end": 639, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 644, |
|
"end": 651, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation Criteria", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "We follow the fine-tuning principles for the inference phase, and we use the time and cost of inference for benchmarking. The models we used for inference experiments are fine-tuned in the last part. Each of the benchmarking results was calculated using the average of time and cost over one thousand samples.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Criteria", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The selection of performance cut-off could be critical because we consider certain models to be Table 4 : Multi-task Baseline Inference Costs. Time is given in milliseconds and score is computed by the division of Time BERTLARGE /Time model .The experiments are conducted on a single RTX 2080 Ti GPU following the evaluation criterion similar to the fine-tuning part. The inference time between tasks is more consistent compared to the fine-tuning phase.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 96, |
|
"end": 103, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Performance Cut-off Selection", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "\"qualified\" after reaching specific performance on the development set. Meanwhile, particular tasks can reach a \"sweet point\" where after a relatively smaller amount of training time, the model reaches performance close to the final converged performance with a negligible difference. The cut-off must be high enough to make sure any model that surpasses the threshold can be competent for the task. On the other hand, if the cut-off is too high, we will not have enough data points to evaluate the model's multi-task performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Performance Cut-off Selection", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Here, our cut-offs were selected by observing the recent state-of-the-art model's performance on the selected dataset for the task 4 . A wise choice would be choosing the performance of some classic methods like LSTM-CRF or Bi-LSTM models as the cut-off. In this way, we can easily compare the efficiency of most models with a solid performance bar.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Performance Cut-off Selection", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Submissions can be made to our benchmark through sending code and result to our HULK benchmark CodaLab competition 5 following the guidelines in both our FAQ part of website and competition introduction. We require the submissions to include detailed end-to-end model training information, including model run time, cost (cloud-based machine only), parameter number, and part of the development set output for result validation. A training / fine-tuning log, including time consumption and dev set performance af-ter certain steps, is also required. For inference, development set output, time consumption, and hardware/software details should be provided. For model reproducibility, source code is also required.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Submission to Benchmark", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "We adopt the reported resource requirements in the original papers as the pretraining phase baselines for computation-intensive tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline Settings and Analysis", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "For fine-tuning and inference phase, we conduct extensive experiments on given hardware (RTX 2080Ti GPU) with different model settings as shown in Table 3 and Table 4 . We also collect the development set performance with time in finetuning to investigate how the model is fine-tuned for different tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 147, |
|
"end": 166, |
|
"text": "Table 3 and Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Baseline Settings and Analysis", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In our fine-tuning setting, we are given a specific hardware and software configuration. We adjust the hyper-parameter using grid search to minimize the time fine-tuning towards cut-off performance. For example, we choose the proper batch size and learning rate for BERT BASE to make sure the model converges and can reach expected performance as soon as possible with parameter searching.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline Settings and Analysis", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "As shown in Figure 2 , the fine-tuning performance curve differs a lot among pretrained models. The x-axis denoting time consumed is shown in log-scale for a better comparison of different models. None of the models take the lead in all tasks. However, if two pretrained models are in the same family, such as BERT BASE and BERT LARGE , the model with a smaller number of parameters tend to converge a bit faster than the other in the NER and SST-2 task. In the MNLI task, such a trend does Figure 2 : The comparison between different pretrained models for CoNLL 2003, SST-2, and MNLI datasets trained on a single RTX 2080Ti GPU. The curves are smoothed by computing the average with two adjacent data points. The experiments are conducted by selecting hyper-parameters to minimize the time consumption, yet making sure the model can converge after a certain amount of time. Results are demonstrated using performance on the development set after being finetuned for specific steps on the training dataset. not apply because of the increased difficulty level and the number of training instances, which favors a larger model capacity. Even though ALBERT model has a lot fewer parameters than BERT, according to Table 1 , the ALBERT model's fine-tuning time is significantly more than BERT models because ALBERT uses large hidden size and more expensive matrix computation. The parameter sharing technique makes it harder to fine-tune the model. RoBERTa LARGE model is relatively stable in all tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 20, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 491, |
|
"end": 499, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1211, |
|
"end": 1218, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Baseline Settings and Analysis", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "GLUE benchmark (Wang et al., 2018 ) is a popular multi-task benchmarking and diagnosis platform providing score evaluating multi-task NLP models considering multiple single-task performances. Su-perGLUE (Wang et al., 2019) further develops the task and enriches the evaluation dataset, making tasks more challenging. These multi-task benchmarks do not consider computation efficiency but innovates the development of pretrained models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 15, |
|
"end": 33, |
|
"text": "(Wang et al., 2018", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 203, |
|
"end": 222, |
|
"text": "(Wang et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "MLPerf (Mattson et al., 2019) compares training and inference efficiency from a hardware perspective, providing helpful resources on hardware selection and model training. The benchmark focused on several typical applications, including image classification and machine translation. However, it does not separate different training phases, thus making it hard to find the reference for fine-tuning only applications.", |
|
"cite_spans": [ |
|
{ |
|
"start": 7, |
|
"end": 29, |
|
"text": "(Mattson et al., 2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Previous work (Schwartz et al., 2019; Dodge et al., 2019) on related topic working towards \"Green AI\" proposes new metrics like FPO and new principle in efficiency evaluation. We further make more detailed and practical contributions to model energy efficiency benchmarking.", |
|
"cite_spans": [ |
|
{ |
|
"start": 14, |
|
"end": 37, |
|
"text": "(Schwartz et al., 2019;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 38, |
|
"end": 57, |
|
"text": "Dodge et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Other work like DAWNBenchmark (Coleman et al., 2017) looks into the area of end-to-end model efficiency comparison for both computer vision and NLP task SQuAD. The benchmark is very detailed and intuitive. However, it does not compare multitask efficiency performance and covered only one NLP task. Similar to MLPerf, it does not separate fine-tuning efficiency from training efficiency. The Efficient NMT shared task of The 2nd Workshop on Neural Machine Translation and Generation proposed an efficiency track to compare the inference time of neural machine translation models. Our platform covers more phases and supports multi-task comparison.", |
|
"cite_spans": [ |
|
{ |
|
"start": 30, |
|
"end": 52, |
|
"text": "(Coleman et al., 2017)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We developed the HULK platform focusing on the energy efficiency benchmarking of NLP models based on their end-to-end performance on selected NLP tasks. The HULK platform compares models in the pretraining, fine-tuning, and inference phase, making it clear to follow and propose more trainingefficient and inference-efficient models. We have compared the fine-tuning efficiency of given models during baseline testing and demonstrated more parameters lead to slower fine-tuning when using the same model design but do not hold when the model architecture changes.We expect more submissions in the future to flourish and enrich our benchmark.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Source: The Staggering Cost of Training SOTA AI Models by Synced Global 3 Source: Nick Walton's Tweet on Passing 100 Million Inferences on AI Dungeon.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For example, we referred to the performance data points on Papers With Code for candidates.5 The CodaLab competition is available on the website.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://iee.ucsb.edu/news/making-ai-more-energyefficient", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work is supported by the Institute of Energy Efficiency (IEE) at UCSB's seed grant in Summer 2019 to improve AI and machine learning's energy efficiency. 6 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Electra: Pre-training text encoders as discriminators rather than generators", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minh-Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Quoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2003.10555" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kevin Clark, Minh-Thang Luong, Quoc V Le, and Christopher D Manning. 2020. Electra: Pre-training text encoders as discriminators rather than genera- tors. arXiv preprint arXiv:2003.10555.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Dawnbench: An end-to-end deep learning benchmark and competition", |
|
"authors": [ |
|
{ |
|
"first": "Cody", |
|
"middle": [], |
|
"last": "Coleman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deepak", |
|
"middle": [], |
|
"last": "Narayanan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Kang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tian", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luigi", |
|
"middle": [], |
|
"last": "Nardi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Bailis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kunle", |
|
"middle": [], |
|
"last": "Olukotun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "R\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matei", |
|
"middle": [], |
|
"last": "Zaharia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Training", |
|
"volume": "100", |
|
"issue": "101", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cody Coleman, Deepak Narayanan, Daniel Kang, Tian Zhao, Jian Zhang, Luigi Nardi, Peter Bailis, Kunle Olukotun, Chris R\u00e9, and Matei Zaharia. 2017. Dawnbench: An end-to-end deep learning bench- mark and competition. Training, 100(101):102.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.04805" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing. arXiv preprint arXiv:1810.04805.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Show your work: Improved reporting of experimental results", |
|
"authors": [ |
|
{ |
|
"first": "Jesse", |
|
"middle": [], |
|
"last": "Dodge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Suchin", |
|
"middle": [], |
|
"last": "Gururangan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dallas", |
|
"middle": [], |
|
"last": "Card", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roy", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah A", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1909.03004" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jesse Dodge, Suchin Gururangan, Dallas Card, Roy Schwartz, and Noah A Smith. 2019. Show your work: Improved reporting of experimental results. arXiv preprint arXiv:1909.03004.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Albert: A lite bert for self-supervised learning of language representations", |
|
"authors": [ |
|
{ |
|
"first": "Zhenzhong", |
|
"middle": [], |
|
"last": "Lan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mingda", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Goodman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Gimpel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piyush", |
|
"middle": [], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Radu", |
|
"middle": [], |
|
"last": "Soricut", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1909.11942" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, and Radu Soricut. 2019. Albert: A lite bert for self-supervised learn- ing of language representations. arXiv preprint arXiv:1909.11942.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Roberta: A robustly optimized bert pretraining approach", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.11692" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining ap- proach. arXiv preprint arXiv:1907.11692.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Victor Bittorf, et al. 2019. Mlperf training benchmark", |
|
"authors": [ |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Mattson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christine", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cody", |
|
"middle": [], |
|
"last": "Coleman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Diamos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paulius", |
|
"middle": [], |
|
"last": "Micikevicius", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Patterson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hanlin", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gu-Yeon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bailis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1910.01500" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter Mattson, Christine Cheng, Cody Coleman, Greg Diamos, Paulius Micikevicius, David Patterson, Hanlin Tang, Gu-Yeon Wei, Peter Bailis, Victor Bit- torf, et al. 2019. Mlperf training benchmark. arXiv preprint arXiv:1910.01500.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Squad: 100,000+ questions for machine comprehension of text", |
|
"authors": [ |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Rajpurkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Konstantin", |
|
"middle": [], |
|
"last": "Lopyrev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1606.05250" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. Squad: 100,000+ questions for machine comprehension of text. arXiv preprint arXiv:1606.05250.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Introduction to the conll-2003 shared task: Languageindependent named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Erik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fien", |
|
"middle": [], |
|
"last": "Sang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "De Meulder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erik F Sang and Fien De Meulder. 2003. Intro- duction to the conll-2003 shared task: Language- independent named entity recognition.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter", |
|
"authors": [ |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1910.01108" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Victor Sanh, Lysandre Debut, Julien Chaumond, and Thomas Wolf. 2019. Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter. arXiv preprint arXiv:1910.01108.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Recursive deep models for semantic compositionality over a sentiment treebank", |
|
"authors": [ |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Perelygin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jean", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Chuang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Potts", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 2013 conference on empirical methods in natural language processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1631--1642", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher D Manning, Andrew Ng, and Christopher Potts. 2013. Recursive deep models for semantic compositionality over a sentiment tree- bank. In Proceedings of the 2013 conference on empirical methods in natural language processing, pages 1631-1642.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Energy and policy considerations for deep learning in nlp", |
|
"authors": [ |
|
{ |
|
"first": "Emma", |
|
"middle": [], |
|
"last": "Strubell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ananya", |
|
"middle": [], |
|
"last": "Ganesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mc-Callum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1906.02243" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emma Strubell, Ananya Ganesh, and Andrew Mc- Callum. 2019. Energy and policy considera- tions for deep learning in nlp. arXiv preprint arXiv:1906.02243.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Superglue: A stickier benchmark for general-purpose language understanding systems", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yada", |
|
"middle": [], |
|
"last": "Pruksachatkun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikita", |
|
"middle": [], |
|
"last": "Nangia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanpreet", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel R", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1905.00537" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Wang, Yada Pruksachatkun, Nikita Nangia, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R Bowman. 2019. Super- glue: A stickier benchmark for general-purpose language understanding systems. arXiv preprint arXiv:1905.00537.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Glue: A multi-task benchmark and analysis platform for natural language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanpreet", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel R", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1804.07461" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R Bowman. 2018. Glue: A multi-task benchmark and analysis platform for natural language understanding. arXiv preprint arXiv:1804.07461.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "A broad-coverage challenge corpus for sentence understanding through inference", |
|
"authors": [ |
|
{ |
|
"first": "Adina", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikita", |
|
"middle": [], |
|
"last": "Nangia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel R", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1704.05426" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adina Williams, Nikita Nangia, and Samuel R Bow- man. 2017. A broad-coverage challenge corpus for sentence understanding through inference. arXiv preprint arXiv:1704.05426.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Huggingface's transformers: State-of-the-art natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clement", |
|
"middle": [], |
|
"last": "Delangue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Moi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierric", |
|
"middle": [], |
|
"last": "Cistac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R'emi", |
|
"middle": [], |
|
"last": "Louf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Morgan", |
|
"middle": [], |
|
"last": "Funtowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jamie", |
|
"middle": [], |
|
"last": "Brew", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, R'emi Louf, Morgan Funtow- icz, and Jamie Brew. 2019. Huggingface's trans- formers: State-of-the-art natural language process- ing. ArXiv, abs/1910.03771.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Xlnet: Generalized autoregressive pretraining for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc V", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1906.08237" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Car- bonell, Ruslan Salakhutdinov, and Quoc V Le. 2019. Xlnet: Generalized autoregressive pretrain- ing for language understanding. arXiv preprint arXiv:1906.08237.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Screenshot of the leaderboard of website." |
|
}, |
|
"TABREF1": { |
|
"text": "Dataset Information els faster or cheaper to pretrain are recommended.", |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |