|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T16:31:18.196370Z" |
|
}, |
|
"title": "DialDoc 2021 Shared Task: Goal-Oriented Document-grounded Dialogue Modeling", |
|
"authors": [ |
|
{ |
|
"first": "Song", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research AI", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We present the results of Shared Task at Workshop DialDoc 2021 that is focused on document-grounded dialogue and conversational question answering. The primary goal of this Shared Task is to build goal-oriented information-seeking conversation systems that can identify the most relevant knowledge in the associated document for generating agent responses in natural language. It includes two subtasks on predicting agent responses: the first subtask is to predict the grounding text span in the given document for next agent response; the second subtask is to generate agent response in natural language given the context. Many submissions outperform baseline significantly. For the first task, the best-performing system achieved 67.1 Exact Match and 76.3 F1. For the second subtask, the best system achieved 41.1 SacreBLEU and highest rank by human evaluation.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We present the results of Shared Task at Workshop DialDoc 2021 that is focused on document-grounded dialogue and conversational question answering. The primary goal of this Shared Task is to build goal-oriented information-seeking conversation systems that can identify the most relevant knowledge in the associated document for generating agent responses in natural language. It includes two subtasks on predicting agent responses: the first subtask is to predict the grounding text span in the given document for next agent response; the second subtask is to generate agent response in natural language given the context. Many submissions outperform baseline significantly. For the first task, the best-performing system achieved 67.1 Exact Match and 76.3 F1. For the second subtask, the best system achieved 41.1 SacreBLEU and highest rank by human evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Goal-oriented conversational systems could assist end users to query information in documents dynamically via natural language interactions. Meanwhile, there is a vast number of documents in which individuals and organizations choose to present their interests and knowledge to the world for broad applications. Thus, it attracts a lot of attentions from researchers and practitioners from different fields. There have been significant individual research threads that show promises in handling heterogeneous knowledge embedded in the documents (Talmor et al., 2021) , including (1) unstructured content such as text passages (CoQA (Reddy et al., 2019) , QuAC (Choi et al., 2018) , ShARC (Saeidi et al., 2018) , DoQA (Campos et al., 2020) , Doc2Dial (Feng et al., 2020) ); (2) semi-structured content such as tables or lists (SQA (Iyyer et al., 2017) , HybridQA (Chen et al., 2020) ); (3) mul-timedia such as images and videos with associated textual descriptions (RecipeQA (Yagcioglu et al., 2018) , PsTuts-VQA (Colas et al., 2020) , MI-MOQA (Singh et al., 2021) ) Despite these recent advances, the challenge remains for handling multiturn queries of complex dialogue scenarios (Ma et al., 2020; Feng et al., 2020) and then respond based on the most relevant content in documents of various types from wide domains. As a step forward, we propose a shared task and competition to invite researchers to bring their individual perspectives and advance the field in joint effort.", |
|
"cite_spans": [ |
|
{ |
|
"start": 545, |
|
"end": 566, |
|
"text": "(Talmor et al., 2021)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 632, |
|
"end": 652, |
|
"text": "(Reddy et al., 2019)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 660, |
|
"end": 679, |
|
"text": "(Choi et al., 2018)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 688, |
|
"end": 709, |
|
"text": "(Saeidi et al., 2018)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 712, |
|
"end": 738, |
|
"text": "DoQA (Campos et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 750, |
|
"end": 769, |
|
"text": "(Feng et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 830, |
|
"end": 850, |
|
"text": "(Iyyer et al., 2017)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 862, |
|
"end": 881, |
|
"text": "(Chen et al., 2020)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 974, |
|
"end": 998, |
|
"text": "(Yagcioglu et al., 2018)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 1012, |
|
"end": 1032, |
|
"text": "(Colas et al., 2020)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1043, |
|
"end": 1063, |
|
"text": "(Singh et al., 2021)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 1180, |
|
"end": 1197, |
|
"text": "(Ma et al., 2020;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 1198, |
|
"end": 1216, |
|
"text": "Feng et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We introduce DialDoc 2021 Shared Task, which focuses on building goal-oriented informationseeking dialogue that are grounded in textual content. In particular, the goal is to develop a dialogue system to comprehend multi-turn queries and identify the most relevant knowledge in the associated document for generating agent responses in natural language. It includes two subtasks for predicting agent response. The first subtask (Subtask 1) is to predict the grounding text span in the given document for next agent response; the second subtask (Subtask 2) is to generate agent response in natural language given the contexts. The dataset used for the task is a goal-oriented document-grounded dialogue dataset Doc2Dial (Feng et al., 2020) . We hosted the leaderboards for Dev-Test and Test phase on eval.ai for two subtasks respectively. There are a total of 23 teams that participated Dev-Test phase. For final test phrase, 11 teams submitted to the leaderboard of Subtask 1, and 9 teams submitted to the leaderboard of Subtask 2. For the first task, the best system achieved 67.09 Exact Match and 76.34 F1. For the second subtask, the best system achieved 41.06 sacrebleu and rank the best by human evaluation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 719, |
|
"end": 738, |
|
"text": "(Feng et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this work, we first describe the dataset and the two subtasks. Then, we provide a summary of the evaluation results from participating systems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We use Doc2Dial dataset 1 introduced in Feng et al. (2020), which contains 4793 goal-oriented dialogues and a total of 488 associated grounding documents from four domains for social welfare: dmv, va, ssa, and studentaid. In this dataset, dialogues contain the scenarios when agent ask follow-up questions for clarification or verification based on dialogue-based and document-based context. Each turn is annotated with (1) grounding span from the associated document, (2) dialogue act, e.g., query, respond and (3) speaker role, either agent or user.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "For developing models, we divide the data into training, validation and test split based on the number of dialogues. For evaluating the models, we provide a dev-test set which contains about 30% test dataset. The final test set also includes dialogue and document data from an unseen domain cdccovid that is not in the training, validation or dev-test set. The dialogues of unseen domain were collected in the same data collection process as published Doc2Dial dataset. Table 1 presents the number of dialogues ('dials'), total turns ('turns') of all dialogues and total turns for prediction ('predicts') in each data split.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 470, |
|
"end": 477, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "This Shared Task focuses on building goal-oriented information-seeking dialogue systems. The goal is to teach a dialogue system to identify the most relevant knowledge in the associated document for generating agent responses in natural language. It includes two subtasks on predicting agent response. The agent can either provide an answer or ask follow-up question. Here we only consider the cases that use queries are answerable.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "This subtask is to predict the grounding span of next agent response. The input current turn, dialogue history and one associated document; the output is a text span. The evaluation is based on token-level F1 and exact match score (Rajpurkar et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 231, |
|
"end": 255, |
|
"text": "(Rajpurkar et al., 2018)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Subtask 1", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "This subtask is to generate the next agent utterance. The input is current turn, dialogue history and the document context; the output is utterance in natural language. The evaluation is based on SacreBLEU (Post, 2018) . We also perform human evaluation on the top three submissions with highest SacreBLEU for determining the final rank.", |
|
"cite_spans": [ |
|
{ |
|
"start": 206, |
|
"end": 218, |
|
"text": "(Post, 2018)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Subtask 2", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Human evaluation We ask human annotators to rank a group of three utterances from the three submissions based on relevance and fluency given document context and dialogue history. relevance is used to measure how well the generated utterance is relevant to grounding span as a response to the previous dialogue turn(s). fluency indicates whether the generated utterance is grammatically correct and generally fluent in English. We randomly select 100 generated turns where the utterances are not all the same. We collect five judgements per group.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Subtask 2", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Subtask 1 The baseline model for Subtask 1 is based on BERT-QA . For each token, it computes the probabilities of start and end positions by a linear projection from the last hidden layers of the BERT model. Then it multiplies the scores of the start and end positions for estimating the probability of the corresponding span. As a baseline, we fine-tune BERT-base on Doc2Dial dataset where the input is dialogue query and the associated document context. The dialogue query is the concatenation of dialogue turns in reverse order.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The task is formulated as an end-toend text generation task. The baseline approach for Subtask 2 is based on sequence-to-sequence model BART by (Lewis et al., 2020) . We fine-tune the pre-trained BART model (bart-cnn-large) on Doc2Dial dataset. The source input consists of current turn, dialogue history along with document title and content that are separated by special tokens. The target output is next agent utterance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 144, |
|
"end": 164, |
|
"text": "(Lewis et al., 2020)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Subtask 2", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We hosted the leaderboards 2 for Dev-Test and Test phase for the two subtasks on eval.ai. The Dev-Test and Test phase lasted three months and one week respectively. There are a total of 23 teams that participated Dev-Test phase. For final Test phrase, 11 teams submitted to the leaderboard of Subtask 1, and 9 teams submitted to the leaderboard of Subtask 2. Among the best-performing systems, some teams utilize additional data for augmentation for pre-training (e.g., CAiRE (Xu et al., 2021) , SCIR-DT (Li et al., 2021) ), some teams employ neural retrievers for obtaining most relevant document passages (e.g., RWTH (Daheim et al., 2021) and ER). For the first task, the best system achieved 67.1 Exact Match and 76.3 F1. For the second subtask, the best system achieved 41.1 sacrebleu and rank the best by human evaluation. Next, we provide a brief summary of the work by 8 teams as listed in Table 2 , who submitted their technical system papers.", |
|
"cite_spans": [ |
|
{ |
|
"start": 476, |
|
"end": 493, |
|
"text": "(Xu et al., 2021)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 504, |
|
"end": 521, |
|
"text": "(Li et al., 2021)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 619, |
|
"end": 640, |
|
"text": "(Daheim et al., 2021)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 897, |
|
"end": 904, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Shared Task Submissions", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "ER 3 participates Subtask 1. It introduces a model that leverages the structure in grounding document and dialogue context. It applies a multi-passage reader model based on transformer-based encoder to encode each passage concatenated with dialogue context and document title. It optimizes both passage selection, start and end position selection with gold knowledge passage during training. The final submission is an ensemble of 12 models and achieves the best results for Subtask 1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ER", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "SCIR-DT (Li et al., 2021) participates Subtask 1. Their methods include data augmentation, model pretraining/fine-tuning, postprocessing, and ensemble. For data augmentation, they use backtranslation and synonym substitution to obtain 5 times of document and dialogue data, which are then paired into 25 times data. They use the augmented data for pretraining BERT and RoBERTa with whole word masking technique and doc2dial data for fine-tuning BERT, RobERTa and ELEC-TRA. The ensemble method selects the most probably rank span based on the linear combination of ranking results per model and learn the hyperpa-2 https://eval.ai/web/challenges/ challenge-page/793/overview 3 The submission is non-archival. rameter for inference. The team ranks 2nd based on the average of normalized F1 and EM scores used for the final evaluation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 8, |
|
"end": 25, |
|
"text": "(Li et al., 2021)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SCIR-DT", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "KU NLP (Kim et al., 2021) participates both tasks. For Subtask 1, they adopt pretrained RoBERTa as backbone and predict dialogue act and span jointly. For Subtask 2, they include several tokens and embeddings based on document structure into input representation for BART. Instead of random order of the training instances, they propose to apply curriculum learning (Xu et al., 2020) based on the computed task difficulty level for each task respectively. The final submission on Subtask 2 is based on the span prediction by a single model. It achieves best SacreBLEU and human evaluation results.", |
|
"cite_spans": [ |
|
{ |
|
"start": 7, |
|
"end": 25, |
|
"text": "(Kim et al., 2021)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 366, |
|
"end": 383, |
|
"text": "(Xu et al., 2020)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "KU NLP", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "RWTH (Daheim et al., 2021) participates both tasks. For Subtask 1, it applies BERTQA with additional span-based specifics in their approach. First, they restrict start and end position only to the begin and end of sub-clauses since Doc2Dial dataset is based on preprocessed spans. In addition, they consider modeling the joint probability of a span inspired by Fajcik et al. (2020) . The final submission is the ensemble of multiple models, where the probability of a span is obtained by marginalizing the joint probability of span and model over all models. For Subtask 2, they propose to cascade over all spans where they use top N (=5) spans as a approximation. The probability is computed jointly. The generation model is trained with cross-entropy using an n-best list obtained from the separately trained selection model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 5, |
|
"end": 26, |
|
"text": "(Daheim et al., 2021)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 361, |
|
"end": 381, |
|
"text": "Fajcik et al. (2020)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "RWTH", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "CAiRE (Xu et al., 2021) participates both tasks. They utilize data augmentation methods and several training techniques. For the first task, it uses QA data such as MRQA shared task dataset (Fisch et al., 2019) and conversational QA data such as CoQA (Reddy et al., 2019) for pretraining RoBERTa with multi-task learning strategy and the models are fine-tuned on Doc2Dial dataset. For the second task, they pretrain BART on Wizard-of-Wikipedia dataset (Dinan et al., 2019) . Then they fine-tune the model using the knowledge prediction results from the first task. The final submission is based on the ensemble of multiple models where the best span is determined by the majority vote by models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 6, |
|
"end": 23, |
|
"text": "(Xu et al., 2021)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 190, |
|
"end": 210, |
|
"text": "(Fisch et al., 2019)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 251, |
|
"end": 271, |
|
"text": "(Reddy et al., 2019)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 452, |
|
"end": 472, |
|
"text": "(Dinan et al., 2019)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CAiRE", |
|
"sec_num": "5.5" |
|
}, |
|
{ |
|
"text": "SB NITK (Bachina et al., 2021) participates Subtask 1. They also adapt data augmentation approaches that utilize additional Question Answering dataset such as SQuAD 2.0 (Lee et al., 2020) , Natural Questions (Kwiatkowski et al., 2019) and CORD-19 (Wang et al., 2020) for pretraining several models including RoBERTa, ALBERT and ELECTRA. Then they experiment with different combinations of ensemble models. The final submission is based on the ensemble of ensemble AL-BERTA and RoBERTa using all three additional datasets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 8, |
|
"end": 30, |
|
"text": "(Bachina et al., 2021)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 169, |
|
"end": 187, |
|
"text": "(Lee et al., 2020)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 208, |
|
"end": 234, |
|
"text": "(Kwiatkowski et al., 2019)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 239, |
|
"end": 266, |
|
"text": "CORD-19 (Wang et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SB NITK", |
|
"sec_num": "5.6" |
|
}, |
|
{ |
|
"text": "JARS (Khosla et al., 2021) participates in Subtask 1. It also uses transformer-based QA models, for which it pretrains on different Question Answering datasets such as SQuAD, different subsets of MRQA-2019 training set along with conversational QA data such as CoQA and QuAC. The experiments suggest that conversational QA datasets are more helpful comparing to QA datasets. They compare three different ensemble methods and use the highest average probability score for span prediction based on multiple models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 5, |
|
"end": 26, |
|
"text": "(Khosla et al., 2021)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "JARS", |
|
"sec_num": "5.7" |
|
}, |
|
{ |
|
"text": "Schlussstein (Chen et al., 2021) submit to both subtasks. For Subtask 1, they pretrain BERT on datasets such as SQuAD and CoQA before finetuning on Doc2Dial. To incorporate longer document content in Doc2Dial dataset, they also experiment with longer document stride and observe per- formance improvement. For Subtask 2, it pretrains BART model on CoQA dataset before fine-tuning it on Doc2Dial dataset.", |
|
"cite_spans": [ |
|
{ |
|
"start": 13, |
|
"end": 32, |
|
"text": "(Chen et al., 2021)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Schlussstein", |
|
"sec_num": "5.8" |
|
}, |
|
{ |
|
"text": "Subtask 1 We present the evaluation results on final Test phase of Subtask1 from 7 participating teams in Table 3 . The submissions are ordered based on the average of normalized F1 and EM scores. All submissions of Test Phase outperform BERT-base baseline by large margin. The scores in parentheses are by single models. All other results except the ones by KU NLP are based on various ensemble methods, which further improve the performances significantly in most cases. Table 4 presents the evaluation results on final test set of Subtask 2 from 4 participating teams. We performance human evaluations on the top three submissions based on SacreBLEU scores. We use three different ways to compute majority vote to get the aggregated results: (1) we consider the rank if it is agreed among at least three annotators;", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 106, |
|
"end": 113, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
}, |
|
{ |
|
"start": 473, |
|
"end": 480, |
|
"text": "Table 4", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "(2) we consider the rank if it is agreed among at least two annotators;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Subtask 2", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(3) we also use the aggregation results provided by Appen platform, which takes consideration of annotator's historical performances.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Subtask 2", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We presented the results of 1st DialDoc 2021 Shared Task, which included two subtasks on document-grounded goal-oriented dialogue modeling. We received submissions from a total of 17 teams during entire phase for Subtask 1, and 9 teams for Subtask 2. All submissions during final Test phase outperformed baselines by a large margin for both subtasks. By organizing this shared task, we hope to invite researchers and practitioners to bring their individual perspectives on the subject, and to jointly advance the techniques toward building assistive agents to access document content for end users by conversing.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "https://doc2dial.github.io/file/ doc2dial_v1.0.1.zip", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank Luis Lastras, Sachindra Joshi, Siva Reddy and Siva Sankalp Patel for a lot of helpful discussions on organizing this shared task. We thank eval.ai for their help and support on hosting the leaderboards on their platform. Finally, we are thankful to IBM Research AI for sponsoring the shared task and competition.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Ensemble albert and roberta for span prediction in question answering", |
|
"authors": [ |
|
{ |
|
"first": "Sony", |
|
"middle": [], |
|
"last": "Bachina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Spandana", |
|
"middle": [], |
|
"last": "Balumuri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sowmya", |
|
"middle": [], |
|
"last": "Kamath", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 1st Workshop on Document-grounded Dialogue and Conversational Question Answering. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sony Bachina, Spandana Balumuri, and Sowmya Ka- math S. 2021. Ensemble albert and roberta for span prediction in question answering. In Proceedings of the 1st Workshop on Document-grounded Dialogue and Conversational Question Answering. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "DoQA -accessing domain-specific FAQs via conversational QA", |
|
"authors": [ |
|
{ |
|
"first": "Jon", |
|
"middle": [], |
|
"last": "Ander Campos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arantxa", |
|
"middle": [], |
|
"last": "Otegi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aitor", |
|
"middle": [], |
|
"last": "Soroa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Deriu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Cieliebak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7302--7314", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.652" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jon Ander Campos, Arantxa Otegi, Aitor Soroa, Jan Deriu, Mark Cieliebak, and Eneko Agirre. 2020. DoQA -accessing domain-specific FAQs via con- versational QA. In Proceedings of the 58th Annual Meeting of the Association for Computational Lin- guistics, pages 7302-7314, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "HybridQA: A dataset of multi-hop question answering over tabular and textual data", |
|
"authors": [ |
|
{ |
|
"first": "Wenhu", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hanwen", |
|
"middle": [], |
|
"last": "Zha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyu", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenhan", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [ |
|
"Yang" |
|
], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Findings of the Association for Computational Linguistics: EMNLP 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1026--1036", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.findings-emnlp.91" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wenhu Chen, Hanwen Zha, Zhiyu Chen, Wenhan Xiong, Hong Wang, and William Yang Wang. 2020. HybridQA: A dataset of multi-hop question answer- ing over tabular and textual data. In Findings of the Association for Computational Linguistics: EMNLP 2020, pages 1026-1036, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Building goal-oriented document-grounded dialogue systems", |
|
"authors": [ |
|
{ |
|
"first": "Xi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Faner", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yeju", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kaixin", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 1st Workshop on Document-grounded Dialogue and Conversational Question Answering. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xi Chen, Faner Lin, Yeju Zhou, and Kaixin Ma. 2021. Building goal-oriented document-grounded dialogue systems. In Proceedings of the 1st Work- shop on Document-grounded Dialogue and Conver- sational Question Answering. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "QuAC: Question answering in context", |
|
"authors": [ |
|
{ |
|
"first": "Eunsol", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "He", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Yatskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wentau", |
|
"middle": [], |
|
"last": "Yih", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2174--2184", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1241" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eunsol Choi, He He, Mohit Iyyer, Mark Yatskar, Wen- tau Yih, Yejin Choi, Percy Liang, and Luke Zettle- moyer. 2018. QuAC: Question answering in con- text. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 2174-2184, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "TutorialVQA: Question answering dataset for tutorial videos", |
|
"authors": [ |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Colas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Seokhwan", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Franck", |
|
"middle": [], |
|
"last": "Dernoncourt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siddhesh", |
|
"middle": [], |
|
"last": "Gupte", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhe", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Doo Soon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5450--5455", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anthony Colas, Seokhwan Kim, Franck Dernoncourt, Siddhesh Gupte, Zhe Wang, and Doo Soon Kim. 2020. TutorialVQA: Question answering dataset for tutorial videos. In Proceedings of the 12th Lan- guage Resources and Evaluation Conference, pages 5450-5455, Marseille, France. European Language Resources Association.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Cascaded span extraction and response generation for document-grounded dialog", |
|
"authors": [ |
|
{ |
|
"first": "Nico", |
|
"middle": [], |
|
"last": "Daheim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Thulke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Dugast", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 1st Workshop on Documentgrounded Dialogue and Conversational Question Answering. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nico Daheim, David Thulke, Christian Dugast, and Hermann Ney. 2021. Cascaded span extraction and response generation for document-grounded dialog. In Proceedings of the 1st Workshop on Document- grounded Dialogue and Conversational Question Answering. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Wizard of wikipedia: Knowledge-powered conversational agents", |
|
"authors": [ |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Dinan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Roller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kurt", |
|
"middle": [], |
|
"last": "Shuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angela", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Auli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emily Dinan, Stephen Roller, Kurt Shuster, Angela Fan, Michael Auli, and Jason Weston. 2019. Wizard of wikipedia: Knowledge-powered conversational agents. In International Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "BUT-FIT at SemEval-2020 task 5: Automatic detection of counterfactual statements with deep pre-trained language representation models", |
|
"authors": [ |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Fajcik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Jon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Docekal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pavel", |
|
"middle": [], |
|
"last": "Smrz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Fourteenth Workshop on Semantic Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "437--444", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martin Fajcik, Josef Jon, Martin Docekal, and Pavel Smrz. 2020. BUT-FIT at SemEval-2020 task 5: Au- tomatic detection of counterfactual statements with deep pre-trained language representation models. In Proceedings of the Fourteenth Workshop on Seman- tic Evaluation, pages 437-444, Barcelona (online). International Committee for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "2020. doc2dial: A goal-oriented document-grounded dialogue dataset", |
|
"authors": [ |
|
{ |
|
"first": "Song", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hui", |
|
"middle": [], |
|
"last": "Wan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chulaka", |
|
"middle": [], |
|
"last": "Gunasekara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siva", |
|
"middle": [], |
|
"last": "Patel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sachindra", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luis", |
|
"middle": [], |
|
"last": "Lastras", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "8118--8128", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.emnlp-main.652" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Song Feng, Hui Wan, Chulaka Gunasekara, Siva Patel, Sachindra Joshi, and Luis Lastras. 2020. doc2dial: A goal-oriented document-grounded dia- logue dataset. In Proceedings of the 2020 Confer- ence on Empirical Methods in Natural Language Processing (EMNLP), pages 8118-8128, Online. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "MRQA 2019 shared task: Evaluating generalization in reading comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Fisch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Talmor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minjoon", |
|
"middle": [], |
|
"last": "Seo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eunsol", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2nd Workshop on Machine Reading for Question Answering", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--13", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-5801" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Fisch, Alon Talmor, Robin Jia, Minjoon Seo, Eu- nsol Choi, and Danqi Chen. 2019. MRQA 2019 shared task: Evaluating generalization in reading comprehension. In Proceedings of the 2nd Work- shop on Machine Reading for Question Answering, pages 1-13, Hong Kong, China. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Search-based neural structured learning for sequential question answering", |
|
"authors": [ |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yih", |
|
"middle": [], |
|
"last": "Wen-Tau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1821--1831", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P17-1167" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohit Iyyer, Wen-tau Yih, and Ming-Wei Chang. 2017. Search-based neural structured learning for sequen- tial question answering. In Proceedings of the 55th Annual Meeting of the Association for Com- putational Linguistics (Volume 1: Long Papers), pages 1821-1831, Vancouver, Canada. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Team jars: Dialdoc subtask 1 -improved knowledge identification with supervised out-of-domain pretraining", |
|
"authors": [ |
|
{ |
|
"first": "Sopan", |
|
"middle": [], |
|
"last": "Khosla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Lovelace", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ritam", |
|
"middle": [], |
|
"last": "Dutt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adithya", |
|
"middle": [], |
|
"last": "Pratapa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 1st Workshop on Document-grounded Dialogue and Conversational Question Answering. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sopan Khosla, Justin Lovelace, Ritam Dutt, and Adithya Pratapa. 2021. Team jars: Dialdoc subtask 1 -improved knowledge identification with super- vised out-of-domain pretraining. In Proceedings of the 1st Workshop on Document-grounded Dialogue and Conversational Question Answering. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Document-grounded goal-oriented dialogue systems on pre-trained language model with diverse input representation", |
|
"authors": [ |
|
{ |
|
"first": "Boeun", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dohaeng", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Harksoo", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sihyung", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jin-Xia", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oh-Woog", |
|
"middle": [], |
|
"last": "Kwon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 1st Workshop on Document-grounded Dialogue and Conversational Question Answering. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Boeun Kim, Dohaeng Lee, Yejin Lee, Harksoo Kim, Sihyung Kim, Jin-Xia Huang, and Oh-Woog Kwon. 2021. Document-grounded goal-oriented dialogue systems on pre-trained language model with diverse input representation. In Proceedings of the 1st Work- shop on Document-grounded Dialogue and Conver- sational Question Answering. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Natural questions: a benchmark for question answering research. Transactions of the Association of Computational Linguistics", |
|
"authors": [ |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Kwiatkowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jennimaria", |
|
"middle": [], |
|
"last": "Palomaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Olivia", |
|
"middle": [], |
|
"last": "Redfield", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ankur", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Alberti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danielle", |
|
"middle": [], |
|
"last": "Epstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Kelcey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tom Kwiatkowski, Jennimaria Palomaki, Olivia Red- field, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Matthew Kelcey, Jacob Devlin, Kenton Lee, Kristina N. Toutanova, Llion Jones, Ming-Wei Chang, Andrew Dai, Jakob Uszkoreit, Quoc Le, and Slav Petrov. 2019. Natu- ral questions: a benchmark for question answering research. Transactions of the Association of Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "SQuAD2-CR: Semi-supervised annotation for cause and rationales for unanswerability in SQuAD 2.0", |
|
"authors": [ |
|
{ |
|
"first": "Gyeongbok", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Seung-Won", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hyunsouk", |
|
"middle": [], |
|
"last": "Hwang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5425--5432", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gyeongbok Lee, Seung-won Hwang, and Hyunsouk Cho. 2020. SQuAD2-CR: Semi-supervised anno- tation for cause and rationales for unanswerability in SQuAD 2.0. In Proceedings of the 12th Lan- guage Resources and Evaluation Conference, pages 5425-5432, Marseille, France. European Language Resources Association.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "BART: Denoising sequence-to-sequence pretraining for natural language generation, translation, and comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal ; Abdelrahman Mohamed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7871--7880", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.703" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mike Lewis, Yinhan Liu, Naman Goyal, Mar- jan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. BART: Denoising sequence-to-sequence pre- training for natural language generation, translation, and comprehension. In Proceedings of the 58th An- nual Meeting of the Association for Computational Linguistics, pages 7871-7880, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Technical report on shared task in dialdoc21", |
|
"authors": [ |
|
{ |
|
"first": "Jiapeng", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mingda", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Longxuan", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weinan", |
|
"middle": [], |
|
"last": "Zhangy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 1st Workshop on Document-grounded Dialogue and Conversational Question Answering", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiapeng Li, Mingda Li, Longxuan Ma, Weinan Zhangy, and Ting Liu. 2021. Technical report on shared task in dialdoc21. In Proceedings of the 1st Workshop on Document-grounded Dialogue and Conversational Question Answering. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "A survey of document grounded dialogue systems (dgds)", |
|
"authors": [ |
|
{ |
|
"first": "Longxuan", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Nan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mingda", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2004.13818" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Longxuan Ma, Wei-Nan Zhang, Mingda Li, and Ting Liu. 2020. A survey of document grounded dialogue systems (dgds). arXiv preprint arXiv:2004.13818.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "A call for clarity in reporting BLEU scores", |
|
"authors": [ |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Post", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Third Conference on Machine Translation: Research Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "186--191", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-6319" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matt Post. 2018. A call for clarity in reporting BLEU scores. In Proceedings of the Third Conference on Machine Translation: Research Papers, pages 186- 191, Brussels, Belgium. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Know what you don't know: Unanswerable questions for SQuAD", |
|
"authors": [ |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Rajpurkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "784--789", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-2124" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pranav Rajpurkar, Robin Jia, and Percy Liang. 2018. Know what you don't know: Unanswerable ques- tions for SQuAD. In Proceedings of the 56th An- nual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 784- 789, Melbourne, Australia. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "CoQA: A conversational question answering challenge", |
|
"authors": [ |
|
{ |
|
"first": "Siva", |
|
"middle": [], |
|
"last": "Reddy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "249--266", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/tacl_a_00266" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Siva Reddy, Danqi Chen, and Christopher D. Manning. 2019. CoQA: A conversational question answering challenge. Transactions of the Association for Com- putational Linguistics, 7:249-266.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Interpretation of natural language rules in conversational machine reading", |
|
"authors": [ |
|
{ |
|
"first": "Marzieh", |
|
"middle": [], |
|
"last": "Saeidi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Bartolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rockt\u00e4schel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Sheldon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Bouchard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Riedel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2087--2097", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1233" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marzieh Saeidi, Max Bartolo, Patrick Lewis, Sameer Singh, Tim Rockt\u00e4schel, Mike Sheldon, Guillaume Bouchard, and Sebastian Riedel. 2018. Interpreta- tion of natural language rules in conversational ma- chine reading. In Proceedings of the 2018 Confer- ence on Empirical Methods in Natural Language Processing, pages 2087-2097, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "MIMOQA: Multimodal input multimodal output question answering", |
|
"authors": [ |
|
{ |
|
"first": "Hrituraj", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anshul", |
|
"middle": [], |
|
"last": "Nasery", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Denil", |
|
"middle": [], |
|
"last": "Mehta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aishwarya", |
|
"middle": [], |
|
"last": "Agarwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jatin", |
|
"middle": [], |
|
"last": "Lamba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Balaji Vasan", |
|
"middle": [], |
|
"last": "Srinivasan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5317--5332", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2021.naacl-main.418" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hrituraj Singh, Anshul Nasery, Denil Mehta, Aish- warya Agarwal, Jatin Lamba, and Balaji Vasan Srini- vasan. 2021. MIMOQA: Multimodal input multi- modal output question answering. In Proceedings of the 2021 Conference of the North American Chap- ter of the Association for Computational Linguistics: Human Language Technologies, pages 5317-5332, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Mul-timodal{qa}: complex question answering over text, tables and images", |
|
"authors": [ |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Talmor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ori", |
|
"middle": [], |
|
"last": "Yoran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amnon", |
|
"middle": [], |
|
"last": "Catav", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Lahav", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yizhong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Akari", |
|
"middle": [], |
|
"last": "Asai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gabriel", |
|
"middle": [], |
|
"last": "Ilharco", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannaneh", |
|
"middle": [], |
|
"last": "Hajishirzi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Berant", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alon Talmor, Ori Yoran, Amnon Catav, Dan Lahav, Yizhong Wang, Akari Asai, Gabriel Ilharco, Han- naneh Hajishirzi, and Jonathan Berant. 2021. Mul- timodal{qa}: complex question answering over text, tables and images. In International Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "CORD-19: The COVID-19 open research dataset", |
|
"authors": [ |
|
{ |
|
"first": "Lucy", |
|
"middle": [ |
|
"Lu" |
|
], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyle", |
|
"middle": [], |
|
"last": "Lo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoganand", |
|
"middle": [], |
|
"last": "Chandrasekhar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Russell", |
|
"middle": [], |
|
"last": "Reas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiangjiang", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Doug", |
|
"middle": [], |
|
"last": "Burdick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Darrin", |
|
"middle": [], |
|
"last": "Eide", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kathryn", |
|
"middle": [], |
|
"last": "Funk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yannis", |
|
"middle": [], |
|
"last": "Katsis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rodney", |
|
"middle": [ |
|
"Michael" |
|
], |
|
"last": "Kinney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yunyao", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ziyang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Merrill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Mooney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dewey", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Murdick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devvret", |
|
"middle": [], |
|
"last": "Rishi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jerry", |
|
"middle": [], |
|
"last": "Sheehan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhihong", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the 1st Workshop on NLP for COVID-19 at ACL 2020, Online. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lucy Lu Wang, Kyle Lo, Yoganand Chandrasekhar, Russell Reas, Jiangjiang Yang, Doug Burdick, Darrin Eide, Kathryn Funk, Yannis Katsis, Rod- ney Michael Kinney, Yunyao Li, Ziyang Liu, William Merrill, Paul Mooney, Dewey A. Murdick, Devvret Rishi, Jerry Sheehan, Zhihong Shen, Bran- don Stilson, Alex D. Wade, Kuansan Wang, Nancy Xin Ru Wang, Christopher Wilhelm, Boya Xie, Dou- glas M. Raymond, Daniel S. Weld, Oren Etzioni, and Sebastian Kohlmeier. 2020. CORD-19: The COVID-19 open research dataset. In Proceedings of the 1st Workshop on NLP for COVID-19 at ACL 2020, Online. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Curriculum learning for natural language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Benfeng", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Licheng", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhendong", |
|
"middle": [], |
|
"last": "Mao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongtao", |
|
"middle": [], |
|
"last": "Xie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yongdong", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6095--6104", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.542" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Benfeng Xu, Licheng Zhang, Zhendong Mao, Quan Wang, Hongtao Xie, and Yongdong Zhang. 2020. Curriculum learning for natural language under- standing. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguistics, pages 6095-6104, Online. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Caire in dialdoc21: Data augmentation for information-seeking dialogue system", |
|
"authors": [ |
|
{ |
|
"first": "Yan", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Etsuko", |
|
"middle": [], |
|
"last": "Ishii", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Genta", |
|
"middle": [], |
|
"last": "Indra Winata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhaojiang", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Madotto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pascale", |
|
"middle": [], |
|
"last": "Fung", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 1st Workshop on Documentgrounded Dialogue and Conversational Question Answering. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yan Xu, Etsuko Ishii, Genta Indra Winata, Zhaojiang Lin, Andrea Madotto, Zihan Liu, Peng Xu, and Pas- cale Fung. 2021. Caire in dialdoc21: Data aug- mentation for information-seeking dialogue system. In Proceedings of the 1st Workshop on Document- grounded Dialogue and Conversational Question Answering. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "RecipeQA: A challenge dataset for multimodal comprehension of cooking recipes", |
|
"authors": [ |
|
{ |
|
"first": "Semih", |
|
"middle": [], |
|
"last": "Yagcioglu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aykut", |
|
"middle": [], |
|
"last": "Erdem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erkut", |
|
"middle": [], |
|
"last": "Erdem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nazli", |
|
"middle": [], |
|
"last": "Ikizler-Cinbis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1358--1368", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1166" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Semih Yagcioglu, Aykut Erdem, Erkut Erdem, and Na- zli Ikizler-Cinbis. 2018. RecipeQA: A challenge dataset for multimodal comprehension of cooking recipes. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 1358-1368, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF1": { |
|
"num": null, |
|
"text": "Statistics of dialogue data of different data splits.", |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"text": "Participating teams and affiliations.", |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table" |
|
}, |
|
"TABREF5": { |
|
"num": null, |
|
"text": "Participating teams of Subtask 1. The rank is based on the average of normalized average of F1 and EM scores.", |
|
"html": null, |
|
"content": "<table><tr><td>Rank</td><td>Team</td><td>SacreBLEU</td></tr><tr><td>1</td><td>KU NLP</td><td>41.1 (41.1)</td></tr><tr><td>2</td><td>RWTH</td><td>40.4 (39.1)</td></tr><tr><td>3</td><td>CAiRE</td><td>37.7 (-)</td></tr><tr><td>4</td><td>SCIR-DT</td><td>30.7 (-)</td></tr><tr><td>-</td><td>baseline</td><td>17.6 (17.6)</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF6": { |
|
"num": null, |
|
"text": "Participating teams and evaluation results on test set of Subtask 2.", |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |