|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T01:05:28.378549Z" |
|
}, |
|
"title": "NUIG-DSI's submission to The GEM Benchmark 2021", |
|
"authors": [ |
|
{ |
|
"first": "Nivranshu", |
|
"middle": [], |
|
"last": "Pasricha", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "SFI Centre for Research Training in Artificial Intelligence", |
|
"institution": "", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Mihael", |
|
"middle": [], |
|
"last": "Arcan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National University of Ireland", |
|
"location": { |
|
"settlement": "Galway" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Buitelaar", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "SFI Centre for Research Training in Artificial Intelligence", |
|
"institution": "", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper describes the submission by NUIG-DSI to the GEM benchmark 2021. We participate in the modeling shared task where we submit outputs on four datasets for datato-text generation, namely, DART, WebNLG (en), E2E and CommonGen. We follow an approach similar to the one described in the GEM benchmark paper where we use the pretrained T5-base model for our submission. We train this model on additional monolingual data where we experiment with different masking strategies specifically focused on masking entities, predicates and concepts as well as a random masking strategy for pre-training. In our results we find that random masking performs the best in terms of automatic evaluation metrics, though the results are not statistically significantly different compared to other masking strategies. Tripleset Antioquia Department country Colombia Bandeja paisa ingredient Chorizo Bandeja paisa region Antioquia Department linearisation Antioquia Department country Colombia Bandeja paisa ingredient Chorizo Bandeja paisa region Antioquia Department tags <SUB> Antioquia Department <PRED> country <OBJ> Colombia <SUB> Bandeja paisa <PRED> ingredient <OBJ> Chorizo <SUB> Bandeja paisa <PRED> region <OBJ> Antioquia Department entity types <LOCATION> Antioquia Department <PRED> country <LOCATION> Colombia <FOOD> Bandeja paisa <PRED> ingredient <SAUSAGE> Chorizo <FOOD> Bandeja paisa <PRED> region <LOCATION> Antioquia Department NER tags <ORG> Antioquia Department <PRED> country <GPE> Colombia <PERSON> Bandeja paisa <PRED> ingredient <UNKNOWN> Chorizo <PERSON> Bandeja paisa <PRED> region <ORG> Antioquia Department (a) Additional tags added to the linearised tripleset. Lexicalisation Chorizo is an ingredient in Bandeja paisa, a dish from the Antioquia Department region, in Colombia. Random Masking Chorizo is an ingredient in Bandeja paisa, a dish [MASK] Antioquia Department [MASK], in Colombia. Entity Masking [MASK] is an ingredient in [MASK], a dish from the [MASK] region, in [MASK]. Predicate Masking Chorizo is an [MASK] in Bandeja paisa, a dish from the Antioquia Department [MASK], in Colombia. (b) Masking strategies for pre-training on monolingual data.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper describes the submission by NUIG-DSI to the GEM benchmark 2021. We participate in the modeling shared task where we submit outputs on four datasets for datato-text generation, namely, DART, WebNLG (en), E2E and CommonGen. We follow an approach similar to the one described in the GEM benchmark paper where we use the pretrained T5-base model for our submission. We train this model on additional monolingual data where we experiment with different masking strategies specifically focused on masking entities, predicates and concepts as well as a random masking strategy for pre-training. In our results we find that random masking performs the best in terms of automatic evaluation metrics, though the results are not statistically significantly different compared to other masking strategies. Tripleset Antioquia Department country Colombia Bandeja paisa ingredient Chorizo Bandeja paisa region Antioquia Department linearisation Antioquia Department country Colombia Bandeja paisa ingredient Chorizo Bandeja paisa region Antioquia Department tags <SUB> Antioquia Department <PRED> country <OBJ> Colombia <SUB> Bandeja paisa <PRED> ingredient <OBJ> Chorizo <SUB> Bandeja paisa <PRED> region <OBJ> Antioquia Department entity types <LOCATION> Antioquia Department <PRED> country <LOCATION> Colombia <FOOD> Bandeja paisa <PRED> ingredient <SAUSAGE> Chorizo <FOOD> Bandeja paisa <PRED> region <LOCATION> Antioquia Department NER tags <ORG> Antioquia Department <PRED> country <GPE> Colombia <PERSON> Bandeja paisa <PRED> ingredient <UNKNOWN> Chorizo <PERSON> Bandeja paisa <PRED> region <ORG> Antioquia Department (a) Additional tags added to the linearised tripleset. Lexicalisation Chorizo is an ingredient in Bandeja paisa, a dish from the Antioquia Department region, in Colombia. Random Masking Chorizo is an ingredient in Bandeja paisa, a dish [MASK] Antioquia Department [MASK], in Colombia. Entity Masking [MASK] is an ingredient in [MASK], a dish from the [MASK] region, in [MASK]. Predicate Masking Chorizo is an [MASK] in Bandeja paisa, a dish from the Antioquia Department [MASK], in Colombia. (b) Masking strategies for pre-training on monolingual data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The GEM Benchmark (Gehrmann et al., 2021 ) is a living benchmark focusing on generation, evaluation and metrics for a variety of natural language generation tasks including summarization, simplification, dialog and data-to-text generation. In general, the field of natural language generation (NLG) is concerned with automatic generation of human understandable texts, typically from a nonlinguistic or textual representation of information as input (Reiter and Dale, 2000) . Traditionally, most applications for NLG have relied on rulebased systems designed using a modular pipeline approach (Gatt and Krahmer, 2018) . However, recently approaches based on neutral networks with an encoder-decoder architecture trained in an endto-end fashion have gained popularity. These typically follow the paradigm of pre-training on a large corpus followed by fine-tuning on a task specific dataset and have been shown to achieve state-of-theart results on many natural language tasks (Raffel et al., 2020; Lewis et al., 2020) . When evaluated by human annotators, neural models for data-to-text generation have been found to produce fluent text though such models might struggle in terms of data coverage, relevance and correctness where rulebased systems score high (Castro Ferreira et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 18, |
|
"end": 40, |
|
"text": "(Gehrmann et al., 2021", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 450, |
|
"end": 473, |
|
"text": "(Reiter and Dale, 2000)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 593, |
|
"end": 617, |
|
"text": "(Gatt and Krahmer, 2018)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 975, |
|
"end": 996, |
|
"text": "(Raffel et al., 2020;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 997, |
|
"end": 1016, |
|
"text": "Lewis et al., 2020)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 1258, |
|
"end": 1288, |
|
"text": "(Castro Ferreira et al., 2020)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In our participation in the GEM benchmark, we submit outputs for four datasets including DART (Nan et al., 2021) , WebNLG (Gardent et al., 2017; Castro Ferreira et al., 2020) , E2E (Novikova et al., 2017; Du\u0161ek et al., 2019) and CommonGen (Lin et al., 2020) . We use the pre-trained T5-base model architecture (Raffel et al., 2020) for our submission implemented using the transformers library from Hugging Face (Wolf et al., 2020) . We first train on monolingual data before fine-tuning on the task-specific dataset. For DART and WebNLG, we use abstracts from DBpedia (Auer et al., 2007) for training while for the other two datasets, we use monolingual target-side references for pre-training with a masked language modeling objective. We experiment with different masking strategies where we mask entities and predicates (for DART), meaning representation fields (for E2E) and concepts (for CommonGen) and compare the results with commonly used approach of random masking. Our results suggest that random masking achieves the best scores for automatic evaluation metrics for DART, WebNLG and E2E while additional pretraining appears to hurt the performance for Com-monGen.", |
|
"cite_spans": [ |
|
{ |
|
"start": 94, |
|
"end": 112, |
|
"text": "(Nan et al., 2021)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 122, |
|
"end": 144, |
|
"text": "(Gardent et al., 2017;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 145, |
|
"end": 174, |
|
"text": "Castro Ferreira et al., 2020)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 181, |
|
"end": 204, |
|
"text": "(Novikova et al., 2017;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 205, |
|
"end": 224, |
|
"text": "Du\u0161ek et al., 2019)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 239, |
|
"end": 257, |
|
"text": "(Lin et al., 2020)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 310, |
|
"end": 331, |
|
"text": "(Raffel et al., 2020)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 412, |
|
"end": 431, |
|
"text": "(Wolf et al., 2020)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 569, |
|
"end": 588, |
|
"text": "(Auer et al., 2007)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this section we define our methodology on the four datasets where we make a submission and subsequently discuss the results based on automatic evaluation metrics defined in the GEM benchmark. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "DART (Nan et al., 2021) consists of open domain data records structured in the form of triples paired with crowd-sourced textual annotations in English describing those triples. The data is collected from multiple different sources including tables from Wikipedia, questions from WikiSQL and merged with two existing data-to-text datasets, namely, WebNLG (en) (Gardent et al., 2017) and cleaned E2E (Du\u0161ek et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 5, |
|
"end": 23, |
|
"text": "(Nan et al., 2021)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 360, |
|
"end": 382, |
|
"text": "(Gardent et al., 2017)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 399, |
|
"end": 419, |
|
"text": "(Du\u0161ek et al., 2019)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "DART", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Since both DART and WebNLG are concerned with the task of triple-to-text generation and have the same input data structure, we follow the same approach as defined in Pasricha et al. (2020) for the WebNLG+ challenge. We use the pre-trained T5 model architecture and first train it on a corpus of abstracts from DBpedia with a masked language modeling objective. For masking, we adopt the commonly used approach of randomly masking 15% of the tokens in texts. We further compare this with an approach where we specifically mask only the entities or only the predicates or a combination of both as shown in Figure 1(b) . The abstracts are downloaded from DBpedia for the entities which are present in the triples contained in the training set of the DART dataset. Since we did not find an abstract for each unique entity in the training set, we ended up with 9,218 abstracts consisting on 1,654,239 tokens and 83,583 types in total with an average of 179.45 tokens per abstract. After pretraining, we fine-tune on the DART training set to predict the target text conditioned on the linearised tripleset.", |
|
"cite_spans": [ |
|
{ |
|
"start": 166, |
|
"end": 188, |
|
"text": "Pasricha et al. (2020)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 604, |
|
"end": 615, |
|
"text": "Figure 1(b)", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "DART", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "For fine-tuning we linearise the input tripleset into a sequence without modifying the order of the triples in the input. We incorporate additional information to mark the subject, predicate and object in each triple in the input by using <SUB>, <PRED> and <OBJ> tags respectively. Additionally, we also include tags for the type of an entity using DBpedia as shown in Figure 1 (a). In the instances where we do not find the type of an entity on DBpedia, we check whether it refers to a time period or a date and assign it the type <TIMEPERIOD>. Otherwise, we assign the type <MEASUREMENT> to an entity containing a numeric value followed by some text. The type <NUMERIC> is assigned to entities which only consist of numeric values and <UNKNOWN> to everything else. Furthermore, as a comparison, we add tags for entities using the named entity recognition pipeline from the spaCy library 1 . All of these tags are included as additional special tokens to the model vocabulary.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 369, |
|
"end": 377, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "DART", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "For our experiments with masking during pretraining on DBpedia abstracts, we use the small variant of the T5 model architecture. This model has approximately 60 million parameters and is much faster to train compared to other larger variants. We use the pre-trained model implementation from Hugging Face's transformers library (Wolf et al., 2020) which consists of 6 layers each in the encoder and decoder with a multi-head attention sub-layer consisting of 8 attention heads. The word embeddings have a dimension of 512 and the fully-connected feed-forward sublayers are 2048dimensional. Pre-training on DBpedia abstracts is done on a single Nvidia GeForce GTX 1080 Ti GPU for 10 epochs with a batch size of 8 using the Adam optimizer with a learning rate of 0.001. All the other hyperparameter values are set to their default values. Table 1 shows scores for the output generations on the validation set for BLEU (Papineni et al., 2002) , METEOR (Banerjee and Lavie, 2005) and ROUGE-L (Lin, 2004) . We find random masking to perform the best in terms of automatic evaluation metrics compared to specifically masking entities or predicates, though the results are not statistically significantly different.", |
|
"cite_spans": [ |
|
{ |
|
"start": 328, |
|
"end": 347, |
|
"text": "(Wolf et al., 2020)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 916, |
|
"end": 939, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 949, |
|
"end": 975, |
|
"text": "(Banerjee and Lavie, 2005)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 988, |
|
"end": 999, |
|
"text": "(Lin, 2004)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 837, |
|
"end": 844, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "DART", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Furthermore, in our experiments we compare the results when additional tags are added to the input either as entity types from DBpedia or NER tags from spaCy or just the <SUB>, <PRED> and <OBJ> tags. For this, we use the T5-base model with approximately 220 million parameters. This model consists of 12 layers each in the encoder and decoder with 12 attention heads in each multihead attention sublayer. The word embeddings are 768-dimensional for this model and feed-forward sublayer is 3072-dimensional. This model is first pre-trained on DBpedia abstracts with a masked language modeling objective where 15% of the tokens are corrupted randomly. For fine-tuning, Table 2 : Results from automatic evaluation on the DART validation set with different tags for fine-tuning. The results are shown here using the T5-base model which is first pre-trained with the random masking on a corpus of DBpedia abstracts.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 667, |
|
"end": 674, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "DART", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "we train on the DART training set for 10 epochs on a single Nvidia GeForce GTX 1080 Ti GPU with a batch size of 16 and select the checkpoint with the highest BLEU score on the validation set. We set the maximum output sequence length to 50 words and apply beam search during inference with a beam of size equal to 5. Here we find that adding the three <SUB>, <PRED> and <OBJ> tags achieves the best results compared to tags from DBpedia or spaCy though the differences in the automatic evaluation results are again not statistically significant. For our final submission to the GEM benchmark, we submit the outputs from this model which is fine-tuned with the added <SUB>, <PRED> and <OBJ> tags.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "DART", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "WebNLG (Gardent et al., 2017) introduced the task of RDF-to-Text generation focused on generating a verbalisation in a human language in the output based on a set of RDF-triples in the input. The WebNLG corpus consists of data units made up of RDF-triples extracted from DBpedia (Auer et al., 2007) and paired with reference text lexicalisations. These texts were collected using crowd-sourcing and contain sequences of one or more short sentences in English, verbalising the data units in the input. The first version of the corpus contained triplesets from 15 DBpedia categories and is divided into two subsets, seen and unseen for evaluation. The ten seen categories are Airport, Astronaut Table 3 : Results from automatic evaluation on the E2E validation set with different masking strategies on monolingual data for pre-training using the T5-base model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 7, |
|
"end": 29, |
|
"text": "(Gardent et al., 2017)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 279, |
|
"end": 298, |
|
"text": "(Auer et al., 2007)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 693, |
|
"end": 700, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "WebNLG", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Since the entire WebNLG (en) corpus is already included the DART dataset without any modifications, we use the same model as defined in \u00a72.1 without any further fine-tuning to generate outputs on the WebNLG (en) dataset. Our overall approach is same as Pasricha et al. (2020) for the WebNLG+ challenge 2020 except here we use additional 6,678 DBpedia abstracts for pre-training and the larger DART dataset for fine-tuning which results in a higher scores for automatic evaluation metrics.", |
|
"cite_spans": [ |
|
{ |
|
"start": 253, |
|
"end": 275, |
|
"text": "Pasricha et al. (2020)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "WebNLG", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "E2E (Novikova et al., 2017) is concerned with generating texts for a dialogue system from meaning representations (MR) in the restaurant domain. It was introduced with the aim of motivating research in domain-specific end-to-end data-driven natural language generation systems. The input for E2E comprises of meaning representations with up to 8 different fields including name, near, area, food, eatType, priceRange, rating and familyFriendly while the output comprises of sentences typically made of up 20 -30 words in English verbalising the input.", |
|
"cite_spans": [ |
|
{ |
|
"start": 4, |
|
"end": 27, |
|
"text": "(Novikova et al., 2017)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "E2E", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "We follow the same approach as described in \u00a72.1 and experiment with masking strategies for pre-training on monolingual data. Instead of using additional out-of-domain data, we use the target side references from the E2E dataset for pretraining with a masked language modeling objective. Here we compare the results on two masking strategies, one where we mask 15% of the token spans randomly and another where we mask specific values based on meaning representation fields such as restaurant names, area, price, etc. This approach is similar to the one described in \u00a72.1 where we masked specifically masked entities and predicates. Table 3 shows scores for the output generations on the validation set for BLEU, ME-TEOR and ROUGE-L. We again find that random Table 4 : Results from automatic evaluation on the Com-monGen validation set with different masking strategies on monolingual data for pre-training using the T5base model. masking appears to perform better though the differences in terms of automatic evaluation metrics are not significantly different. For our submission to the GEM benchmark, we use the same model architecture and hyperparameter values as described previously for DART to generate the output submissions on the E2E test set and challenge sets. This model is first pre-trained on the monolingual target side with a masked language objective where the spans of text are masked randomly and the fine-tuned on the E2E training set containing pairs of meaning representations and target texts.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 633, |
|
"end": 640, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 760, |
|
"end": 767, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "E2E", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "CommonGen (Lin et al., 2020) was introduced with the goal of testing state-of-the-art text generation systems for the ability of commonsense reasoning. The task for CommonGen is to generate a coherent sentence in English describing an everyday scenario using a set of concepts such as man, woman, dog, throw and catch. Lin et al. (2020) have shown that large pre-trained language models are prone to hallucinations and can generate incoherent sentences such as \"hands washing soap on the sink\" for the concept set {hand, sink, wash, soap}. Two key challenges identified by the creators of this dataset are relational reasoning with underlying commonsense knowledge for given concepts and compositional generalization for unseen combinations of concepts.", |
|
"cite_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 28, |
|
"text": "(Lin et al., 2020)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 319, |
|
"end": 336, |
|
"text": "Lin et al. (2020)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CommonGen", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "We again start with the T5-base model and experiment with masked pre-training on the monolingual target side of CommonGen. As described in \u00a72.3 we compare two strategies of masking where we mask spans of text randomly or specifically mask tokens which correspond to concepts in the training set. Table 4 shows scores for the output generations on the validation set for BLEU, METEOR and ROUGE-L. For fine-tuning we shuffle the concepts in the input before concatenating them into a single sequence. We find in our results that additional pre-training on monolingual data on the target appears to hurt the performance when measured with automatic evaluation metrics. This is true in both the cases when masking is done randomly or when only specific concepts are masked.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 296, |
|
"end": 303, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "CommonGen", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "3 Results Table 5 shows results on the validation set, test set and the challenge sets evaluated using GEM metrics 2 . At the time of writing we do not have access to all the references in the test set as well as the challenge sets for DART and CommonGen, hence scores on some subsets are not shown. The evaluation metrics are divided into different categories measuring lexical similarity, semantic equivalence, diversity and system characteristics. Popular metrics such as BLEU (Papineni et al., 2002) , METEOR (Banerjee and Lavie, 2005) and ROUGE-1/2/L (Lin, 2004) are used for lexical similarity, while recently proposed metrics such as BERTScore (Zhang et al., 2020) and BLEURT (Sellam et al., 2020) which rely on sentence embeddings from pre-trained contextualised embedding models such as BERT (Devlin et al., 2019) and RoBERTa (Liu et al., 2019) are used for evaluating semantic equivalence. To account for the diverse outputs, Shannon Entropy (Shannon et al., 1950) is calculated over unigrams and bigrams (H 1 , H 2 ) along with the mean segmented type token ratio over segment lengths of 100 (MSTTR) (Johnson, 1944) . Furthermore, the ratio of distinct n-grams over the total number of n-grams (Distinct 1,2 ), and the count of n-grams that appear once across the entire test output (Unique 1,2 ) is calculated (Li et al., 2018) . The size of the output vocabulary (|V|) and the mean length of the generated output texts are reported as system characteristics (Sun et al., 2019) . Compared to the baselines described in the GEM benchmark (Gehrmann et al., 2021) , we observe higher scores in our submissions for automatic metrics on the CommonGen and DART datasets while scoring lower on the cleaned E2E and WebNLG (en) datasets especially on the test and challenge subsets for both E2E and WebNLG.", |
|
"cite_spans": [ |
|
{ |
|
"start": 480, |
|
"end": 503, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 513, |
|
"end": 539, |
|
"text": "(Banerjee and Lavie, 2005)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 556, |
|
"end": 567, |
|
"text": "(Lin, 2004)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 651, |
|
"end": 671, |
|
"text": "(Zhang et al., 2020)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 683, |
|
"end": 704, |
|
"text": "(Sellam et al., 2020)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 801, |
|
"end": 822, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 835, |
|
"end": 853, |
|
"text": "(Liu et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 952, |
|
"end": 974, |
|
"text": "(Shannon et al., 1950)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 1111, |
|
"end": 1126, |
|
"text": "(Johnson, 1944)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 1322, |
|
"end": 1339, |
|
"text": "(Li et al., 2018)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1471, |
|
"end": 1489, |
|
"text": "(Sun et al., 2019)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 1549, |
|
"end": 1572, |
|
"text": "(Gehrmann et al., 2021)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 17, |
|
"text": "Table 5", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "CommonGen", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "We presented a description of the system submitted by NUIG-DSI to the GEM benchmark 2021. We participated in the modeling shared task and submitted outputs on four datasets for data-to-text generation including DART, WebNLG (en), E2E and CommonGen using the T5-base model. We first trained this model with monolingual data from DBpedia abstracts and target side references before fine-tuning on respective training datasets. Additionally we experimented with various masking strategies focusing specifically on masking entities, predicates and concepts as well as a random masking strategy for training. We found random masking to perform the best and submit our final outputs using this approach.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "https://github.com/GEM-benchmark/ GEM-metrics", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Dbpedia: A nucleus for a web of open data", |
|
"authors": [ |
|
{ |
|
"first": "S\u00f6ren", |
|
"middle": [], |
|
"last": "Auer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Bizer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Georgi", |
|
"middle": [], |
|
"last": "Kobilarov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jens", |
|
"middle": [], |
|
"last": "Lehmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Cyganiak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zachary", |
|
"middle": [], |
|
"last": "Ives", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "The semantic web", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "722--735", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S\u00f6ren Auer, Christian Bizer, Georgi Kobilarov, Jens Lehmann, Richard Cyganiak, and Zachary Ives. 2007. Dbpedia: A nucleus for a web of open data. In The semantic web, pages 722-735. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "METEOR: An automatic metric for MT evaluation with improved correlation with human judgments", |
|
"authors": [ |
|
{ |
|
"first": "Satanjeev", |
|
"middle": [], |
|
"last": "Banerjee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Lavie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "65--72", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Satanjeev Banerjee and Alon Lavie. 2005. METEOR: An automatic metric for MT evaluation with im- proved correlation with human judgments. In Pro- ceedings of the ACL Workshop on Intrinsic and Ex- trinsic Evaluation Measures for Machine Transla- tion and/or Summarization, pages 65-72, Ann Ar- bor, Michigan. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "The 2020 bilingual, bi-directional WebNLG+ shared task: Overview and evaluation results", |
|
"authors": [ |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Thiago Castro Ferreira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikolai", |
|
"middle": [], |
|
"last": "Gardent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Ilinykh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Van Der Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diego", |
|
"middle": [], |
|
"last": "Mille", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anastasia", |
|
"middle": [], |
|
"last": "Moussallem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Shimorina", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 3rd International Workshop on Natural Language Generation from the Semantic Web (WebNLG+)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "55--76", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thiago Castro Ferreira, Claire Gardent, Nikolai Ilinykh, Chris van der Lee, Simon Mille, Diego Moussallem, and Anastasia Shimorina. 2020. The 2020 bilingual, bi-directional WebNLG+ shared task: Overview and evaluation results (WebNLG+ 2020). In Proceedings of the 3rd International Work- shop on Natural Language Generation from the Se- mantic Web (WebNLG+), pages 55-76, Dublin, Ire- land (Virtual). Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Semantic noise matters for neural natural language generation", |
|
"authors": [ |
|
{ |
|
"first": "Ond\u0159ej", |
|
"middle": [], |
|
"last": "Du\u0161ek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Howcroft", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Verena", |
|
"middle": [], |
|
"last": "Rieser", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 12th International Conference on Natural Language Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "421--426", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-8652" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ond\u0159ej Du\u0161ek, David M. Howcroft, and Verena Rieser. 2019. Semantic noise matters for neural natural lan- guage generation. In Proceedings of the 12th Inter- national Conference on Natural Language Genera- tion, pages 421-426, Tokyo, Japan. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "The WebNLG challenge: Generating text from RDF data", |
|
"authors": [ |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Gardent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anastasia", |
|
"middle": [], |
|
"last": "Shimorina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shashi", |
|
"middle": [], |
|
"last": "Narayan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laura", |
|
"middle": [], |
|
"last": "Perez-Beltrachini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 10th International Conference on Natural Language Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "124--133", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W17-3518" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Claire Gardent, Anastasia Shimorina, Shashi Narayan, and Laura Perez-Beltrachini. 2017. The WebNLG challenge: Generating text from RDF data. In Pro- ceedings of the 10th International Conference on Natural Language Generation, pages 124-133, San- tiago de Compostela, Spain. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Survey of the State of the Art in Natural Language Generation: Core tasks, applications and evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Albert", |
|
"middle": [], |
|
"last": "Gatt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emiel", |
|
"middle": [], |
|
"last": "Krahmer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Journal of Artificial Intelligence Research", |
|
"volume": "61", |
|
"issue": "", |
|
"pages": "65--170", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1613/jair.5477" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Albert Gatt and Emiel Krahmer. 2018. Survey of the State of the Art in Natural Language Generation: Core tasks, applications and evaluation. Journal of Artificial Intelligence Research, 61:65-170.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "The gem benchmark: Natural language generation, its evaluation and metrics", |
|
"authors": [ |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Gehrmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tosin", |
|
"middle": [], |
|
"last": "Adewumi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karmanya", |
|
"middle": [], |
|
"last": "Aggarwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pawan", |
|
"middle": [], |
|
"last": "Sasanka Ammanamanchi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aremu", |
|
"middle": [], |
|
"last": "Anuoluwapo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bosselut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miruna", |
|
"middle": [], |
|
"last": "Khyathi Raghavi Chandu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dipanjan", |
|
"middle": [], |
|
"last": "Clinciu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Kaustubh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dhole", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2102.01672" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastian Gehrmann, Tosin Adewumi, Karmanya Ag- garwal, Pawan Sasanka Ammanamanchi, Aremu Anuoluwapo, Antoine Bosselut, Khyathi Raghavi Chandu, Miruna Clinciu, Dipanjan Das, Kaustubh D Dhole, et al. 2021. The gem benchmark: Natu- ral language generation, its evaluation and metrics. arXiv preprint arXiv:2102.01672.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Studies in language behavior: A program of research", |
|
"authors": [ |
|
{ |
|
"first": "Wendell", |
|
"middle": [ |
|
"Johnson" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1944, |
|
"venue": "Psychological Monographs", |
|
"volume": "56", |
|
"issue": "2", |
|
"pages": "1--15", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wendell Johnson. 1944. Studies in language behavior: A program of research. Psychological Monographs, 56(2):1-15.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Bart: Denoising sequence-to-sequence pretraining for natural language generation, translation, and comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal ; Abdelrahman Mohamed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7871--7880", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mike Lewis, Yinhan Liu, Naman Goyal, Mar- jan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. Bart: Denoising sequence-to-sequence pre- training for natural language generation, translation, and comprehension. In Proceedings of the 58th An- nual Meeting of the Association for Computational Linguistics, pages 7871-7880.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Visual question generation as dual task of visual question answering", |
|
"authors": [ |
|
{ |
|
"first": "Yikang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nan", |
|
"middle": [], |
|
"last": "Duan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bolei", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiao", |
|
"middle": [], |
|
"last": "Chu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wanli", |
|
"middle": [], |
|
"last": "Ouyang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaogang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6116--6124", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/CVPR.2018.00640" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yikang Li, Nan Duan, Bolei Zhou, Xiao Chu, Wanli Ouyang, Xiaogang Wang, and Ming Zhou. 2018. Visual question generation as dual task of visual question answering. In 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6116-6124.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "CommonGen: A constrained text generation challenge for generative commonsense reasoning", |
|
"authors": [ |
|
{ |
|
"first": "Wangchunshu", |
|
"middle": [], |
|
"last": "Bill Yuchen Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pei", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chandra", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Bhagavatula", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Findings of the Association for Computational Linguistics: EMNLP 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1823--1840", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.findings-emnlp.165" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bill Yuchen Lin, Wangchunshu Zhou, Ming Shen, Pei Zhou, Chandra Bhagavatula, Yejin Choi, and Xiang Ren. 2020. CommonGen: A constrained text gen- eration challenge for generative commonsense rea- soning. In Findings of the Association for Computa- tional Linguistics: EMNLP 2020, pages 1823-1840, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "ROUGE: A package for automatic evaluation of summaries", |
|
"authors": [ |
|
{ |
|
"first": "Chin-Yew", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Text Summarization Branches Out", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "74--81", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chin-Yew Lin. 2004. ROUGE: A package for auto- matic evaluation of summaries. In Text Summariza- tion Branches Out, pages 74-81, Barcelona, Spain. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Roberta: A robustly optimized bert pretraining approach", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.11692" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining ap- proach. arXiv preprint arXiv:1907.11692.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Dart: Opendomain structured data record to text generation", |
|
"authors": [ |
|
{ |
|
"first": "Linyong", |
|
"middle": [], |
|
"last": "Nan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dragomir", |
|
"middle": [], |
|
"last": "Radev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amrit", |
|
"middle": [], |
|
"last": "Rau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abhinand", |
|
"middle": [], |
|
"last": "Sivaprasad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chiachun", |
|
"middle": [], |
|
"last": "Hsieh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiangru", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aadit", |
|
"middle": [], |
|
"last": "Vyas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Neha", |
|
"middle": [], |
|
"last": "Verma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Krishna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yangxiaokang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nadia", |
|
"middle": [], |
|
"last": "Irwanto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jessica", |
|
"middle": [], |
|
"last": "Pan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Faiaz", |
|
"middle": [], |
|
"last": "Rahman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ahmad", |
|
"middle": [], |
|
"last": "Zaidi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Murori", |
|
"middle": [], |
|
"last": "Mutuma", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2007.02871" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Linyong Nan, Dragomir Radev, Rui Zhang, Amrit Rau, Abhinand Sivaprasad, Chiachun Hsieh, Xian- gru Tang, Aadit Vyas, Neha Verma, Pranav Kr- ishna, Yangxiaokang Liu, Nadia Irwanto, Jessica Pan, Faiaz Rahman, Ahmad Zaidi, Murori Mutuma, Yasin Tarabar, Ankit Gupta, Tao Yu, Yi Chern Tan, Xi Victoria Lin, Caiming Xiong, Richard Socher, and Nazneen Fatema Rajani. 2021. Dart: Open- domain structured data record to text generation. arXiv preprint arXiv:2007.02871.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "The E2E dataset: New challenges for endto-end generation", |
|
"authors": [ |
|
{ |
|
"first": "Jekaterina", |
|
"middle": [], |
|
"last": "Novikova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ond\u0159ej", |
|
"middle": [], |
|
"last": "Du\u0161ek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Verena", |
|
"middle": [], |
|
"last": "Rieser", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 18th Annual SIGdial Meeting on Discourse and Dialogue", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "201--206", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W17-5525" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jekaterina Novikova, Ond\u0159ej Du\u0161ek, and Verena Rieser. 2017. The E2E dataset: New challenges for end- to-end generation. In Proceedings of the 18th An- nual SIGdial Meeting on Discourse and Dialogue, pages 201-206, Saarbr\u00fccken, Germany. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Bleu: A method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th Annual Meeting on Association for Computational Linguistics, ACL '02", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/1073083.1073135" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: A method for automatic eval- uation of machine translation. In Proceedings of the 40th Annual Meeting on Association for Computa- tional Linguistics, ACL '02, page 311-318, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "NUIG-DSI at the WebNLG+ challenge: Leveraging transfer learning for RDF-to-text generation", |
|
"authors": [ |
|
{ |
|
"first": "Nivranshu", |
|
"middle": [], |
|
"last": "Pasricha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mihael", |
|
"middle": [], |
|
"last": "Arcan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Buitelaar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 3rd International Workshop on Natural Language Generation from the Semantic Web (WebNLG+)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "137--143", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nivranshu Pasricha, Mihael Arcan, and Paul Buite- laar. 2020. NUIG-DSI at the WebNLG+ chal- lenge: Leveraging transfer learning for RDF-to-text generation. In Proceedings of the 3rd Interna- tional Workshop on Natural Language Generation from the Semantic Web (WebNLG+), pages 137-143, Dublin, Ireland (Virtual). Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Exploring the limits of transfer learning with a unified text-totext transformer", |
|
"authors": [ |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Raffel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Roberts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katherine", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sharan", |
|
"middle": [], |
|
"last": "Narang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Matena", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanqi", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "21", |
|
"issue": "140", |
|
"pages": "1--67", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Colin Raffel, Noam Shazeer, Adam Roberts, Kather- ine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2020. Exploring the limits of transfer learning with a unified text-to- text transformer. Journal of Machine Learning Re- search, 21(140):1-67.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Building Natural Language Generation Systems, 1 edition", |
|
"authors": [ |
|
{ |
|
"first": "Ehud", |
|
"middle": [], |
|
"last": "Reiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Dale", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1017/CBO9780511519857" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ehud Reiter and Robert Dale. 2000. Building Natu- ral Language Generation Systems, 1 edition. Cam- bridge University Press.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "BLEURT: Learning robust metrics for text generation", |
|
"authors": [ |
|
{ |
|
"first": "Thibault", |
|
"middle": [], |
|
"last": "Sellam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dipanjan", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ankur", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7881--7892", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.704" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thibault Sellam, Dipanjan Das, and Ankur Parikh. 2020. BLEURT: Learning robust metrics for text generation. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguistics, pages 7881-7892, Online. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "The mathematical theory of communication", |
|
"authors": [ |
|
{ |
|
"first": "Claude", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Shannon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Warren", |
|
"middle": [], |
|
"last": "Weaver", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Norbert", |
|
"middle": [], |
|
"last": "Wiener", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1950, |
|
"venue": "Physics Today", |
|
"volume": "3", |
|
"issue": "9", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Claude E Shannon, Warren Weaver, and Norbert Wiener. 1950. The mathematical theory of commu- nication. Physics Today, 3(9):31.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "How to compare summarizers without target length? pitfalls, solutions and re-examination of the neural summarization literature", |
|
"authors": [ |
|
{ |
|
"first": "Simeng", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ori", |
|
"middle": [], |
|
"last": "Shapira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ani", |
|
"middle": [], |
|
"last": "Nenkova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Workshop on Methods for Optimizing and Evaluating Neural Language Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "21--29", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-2303" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Simeng Sun, Ori Shapira, Ido Dagan, and Ani Nenkova. 2019. How to compare summarizers without target length? pitfalls, solutions and re-examination of the neural summarization literature. In Proceedings of the Workshop on Methods for Optimizing and Eval- uating Neural Language Generation, pages 21-29, Minneapolis, Minnesota. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Transformers: State-of-the-art natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clement", |
|
"middle": [], |
|
"last": "Delangue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Moi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierric", |
|
"middle": [], |
|
"last": "Cistac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Remi", |
|
"middle": [], |
|
"last": "Louf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Morgan", |
|
"middle": [], |
|
"last": "Funtowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joe", |
|
"middle": [], |
|
"last": "Davison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Shleifer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clara", |
|
"middle": [], |
|
"last": "Patrick Von Platen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yacine", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Jernite", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Canwen", |
|
"middle": [], |
|
"last": "Plu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Teven", |
|
"middle": [ |
|
"Le" |
|
], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sylvain", |
|
"middle": [], |
|
"last": "Scao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mariama", |
|
"middle": [], |
|
"last": "Gugger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Drame", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "38--45", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.emnlp-demos.6" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, Remi Louf, Morgan Funtow- icz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander Rush. 2020. Trans- formers: State-of-the-art natural language process- ing. In Proceedings of the 2020 Conference on Em- pirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Bertscore: Evaluating text generation with bert", |
|
"authors": [ |
|
{ |
|
"first": "Tianyi", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Varsha", |
|
"middle": [], |
|
"last": "Kishore", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kilian", |
|
"middle": [ |
|
"Q" |
|
], |
|
"last": "Weinberger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Artzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "International Conference on Learning Representations, Addis Ababa", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q. Weinberger, and Yoav Artzi. 2020. Bertscore: Evaluating text generation with bert. In Interna- tional Conference on Learning Representations, Ad- dis Ababa, Ethiopia.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Example of a tripleset from the DART dataset with additional information tags included after linearisation for fine-tuning (top) and different masking strategies applied to a sentence for pre-training (bottom).", |
|
"uris": null |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td>: Results from automatic evaluation on the</td></tr><tr><td>DART validation set with different masking strategies</td></tr><tr><td>on DBpedia abstracts for pre-training using the T5-</td></tr><tr><td>small model.</td></tr></table>", |
|
"num": null, |
|
"text": "" |
|
}, |
|
"TABREF6": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>", |
|
"num": null, |
|
"text": "Results from automatic evaluation metrics measuring lexical similarity, semantic equivalence, diversity and system characteristics on the validation set, test set and the three challenge sets -sample, scramble and numbers for DART, WebNLG (en), E2E and CommonGen." |
|
} |
|
} |
|
} |
|
} |