|
{ |
|
"paper_id": "N03-1007", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:07:11.119143Z" |
|
}, |
|
"title": "An Analysis of Clarification Dialogue for Question Answering", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "De Boni", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Suresh", |
|
"middle": [], |
|
"last": "Manandhar", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We examine clarification dialogue, a mechanism for refining user questions with follow-up questions, in the context of open domain Question Answering systems. We develop an algorithm for clarification dialogue recognition through the analysis of collected data on clarification dialogues and examine the importance of clarification dialogue recognition for question answering. The algorithm is evaluated and shown to successfully recognize the occurrence of clarification dialogue in the majority of cases and to simplify the task of answer retrieval.", |
|
"pdf_parse": { |
|
"paper_id": "N03-1007", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We examine clarification dialogue, a mechanism for refining user questions with follow-up questions, in the context of open domain Question Answering systems. We develop an algorithm for clarification dialogue recognition through the analysis of collected data on clarification dialogues and examine the importance of clarification dialogue recognition for question answering. The algorithm is evaluated and shown to successfully recognize the occurrence of clarification dialogue in the majority of cases and to simplify the task of answer retrieval.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Question Answering Systems aim to determine an answer to a question by searching for a response in a collection of documents (see Voorhees 2002 for an overview of current systems). In order to achieve this (see for example Harabagiu et al. 2002) , systems narrow down the search by using information retrieval techniques to select a subset of documents, or paragraphs within documents, containing keywords from the question and a concept which corresponds to the correct question type (e.g. a question starting with the word \"Who?\" would require an answer containing a person). The exact answer sentence is then sought by either attempting to unify the answer semantically with the question, through some kind of logical transformation (e.g. Moldovan and Rus 2001) or by some form of pattern matching (e.g. Soubbotin 2002; Harabagiu et al. 1999) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 130, |
|
"end": 143, |
|
"text": "Voorhees 2002", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 223, |
|
"end": 245, |
|
"text": "Harabagiu et al. 2002)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 742, |
|
"end": 764, |
|
"text": "Moldovan and Rus 2001)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 807, |
|
"end": 822, |
|
"text": "Soubbotin 2002;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 823, |
|
"end": 845, |
|
"text": "Harabagiu et al. 1999)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Clarification dialogues in Question Answering", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Often, though, a single question is not enough to meet user's goals and an elaboration or clarification dialogue is required, i.e. a dialogue with the user which would enable the answering system to refine its understanding of the questioner's needs (for reasons of space we shall not investigate here the difference between elaboration dialogues, clarification dialogues and coherent topical subdialogues and we shall hence refer to this type of dialogue simply as \"clarification dialogue\", noting that this may not be entirely satisfactory from a theoretical linguistic point of view). While a number of researchers have looked at clarification dialogue from a theoretical point of view (e.g. Ginzburg 1998; Ginzburg and Sag 2000; van Beek at al. 1993) , or from the point of view of task oriented dialogue within a narrow domain (e.g. Ardissono and Sestero 1996) , we are not aware of any work on clarification dialogue for open domain question answering systems such as the ones presented at the TREC workshops, apart from the experiments carried out for the (subsequently abandoned) \"context\" task in the TREC-10 QA workshop (Voorhees 2002; Harabagiu et al. 2002 ). Here we seek to partially address this problem by looking at some particular aspect of clarification dialogues in the context of open domain question answering. In particular, we examine the problem of recognizing that a clarification dialogue is occurring, i.e. how to recognize that the current question under consideration is part of a previous series (i.e. clarifying previous questions) or the start of a new series; we then show how the recognition that a clarification dialogue is occurring can simplify the problem of answer retrieval.", |
|
"cite_spans": [ |
|
{ |
|
"start": 695, |
|
"end": 709, |
|
"text": "Ginzburg 1998;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 710, |
|
"end": 732, |
|
"text": "Ginzburg and Sag 2000;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 733, |
|
"end": 754, |
|
"text": "van Beek at al. 1993)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 838, |
|
"end": 865, |
|
"text": "Ardissono and Sestero 1996)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 1130, |
|
"end": 1145, |
|
"text": "(Voorhees 2002;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 1146, |
|
"end": 1167, |
|
"text": "Harabagiu et al. 2002", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Clarification dialogues in Question Answering", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The TREC-2001 QA track included a \"context\" task which aimed at testing systems' ability to track context through a series of questions (Voorhees 2002) . In other words, systems were required to respond correctly to a kind of clarification dialogue in which a full understanding of questions depended on an understanding of previous questions. In order to test the ability to answer such questions correctly, a total of 42 questions were prepared by NIST staff, divided into 10 series of related question sentences which therefore constituted a type of clarification dialogue; the sentences varied in length between 3 and 8 questions, with an average of 4 questions per dialogue. These clarification dialogues were however presented to the question answering systems already classified and hence systems did not need to recognize that clarification was actually taking place. Consequently systems that simply looked for an answer in the subset of documents retrieved for the first question in a series performed well without any understanding of the fact that the questions constituted a coherent series.", |
|
"cite_spans": [ |
|
{ |
|
"start": 136, |
|
"end": 151, |
|
"text": "(Voorhees 2002)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The TREC Context Experiments", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In a more realistic approach, systems would not be informed in advance of the start and end of a series of clarification questions and would not be able to use this information to limit the subset of documents in which an answer is to be sought.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The TREC Context Experiments", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We manually analysed the TREC context question collection in order to determine what features could be used to determine the start and end of a question series, with the following conclusions:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Analysis of the TREC context questions", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Pronouns and possessive adjectives: questions such as \"When was it born?\", which followed \"What was the first transgenic mammal?\", were referring to some previously mentioned object through a pronoun (\"it\"). The use of personal pronouns (\"he\", \"it\", \u2026) and possessive adjectives (\"his\", \"her\",\u2026) which did not have any referent in the question under consideration was therefore considered an indication of a clarification question.. \u2022 Absence of verbs: questions such as \"On what body of water?\" clearly referred to some previous question or answer. \u2022 Repetition of proper nouns: the question series starting with \"What type of vessel was the modern Varyag?\" had a follow-up question \"How long was the Varyag?\", where the repetition of the proper noun indicates that the same subject matter is under investigation. \u2022 Importance of semantic relations: the first question series started with the question \"Which museum in Florence was damaged by a major bomb explosion?\"; follow-up questions included \"How many people were killed?\" and \"How much explosive was used?\", where there is a clear semantic relation between the \"explosion\" of the initial question and the \"killing\" and \"explosive\" of the following questions. Questions belonging to a series were \"about\" the same subject, and this aboutness could be seen in the use of semantically related words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Analysis of the TREC context questions", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "It was therefore speculated that an algorithm which made use of these features would successfully recognize the occurrence of clarification dialogue. Given that the only available data was the collection of \"context\" questions used in TREC-10, it was felt necessary to collect further data in order to test our algorithm rigorously. This was necessary both because of the small number of questions in the TREC data and the fact that there was no guarantee that an algorithm built for this dataset would perform well on \"real\" user questions. A collection of 253 questions was therefore put together by asking potential users to seek information on a particular topic by asking a prototype question answering system a series of questions, with \"cue\" questions derived from the TREC question collection given as starting points for the dialogues. These questions made up 24 clarification dialogues, varying in length from 3 questions to 23, with an average length of 12 questions (the data is available from the main author upon request). The differences between the TREC \"context\" collection and the new collection are summarized in the following The questions were recorded and manually tagged to recognize the occurrence of clarification dialogue. The questions thus collected were then fed into a system implementing the algorithm, with no indication as to where a clarification dialogue occurred. The system then attempted to recognize the occurrence of a clarification dialogue. Finally the results given by the system were compared to the manually recognized clarification dialogue tags. In particular the algorithm was evaluated for its capacity to:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments in Clarification Dialogue Recognition", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 recognize a new series of questions (i.e. to tell that the current question is not a clarification of any previous question) (indicated by New in the results table)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments in Clarification Dialogue Recognition", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 recognize that the current question is clarifying a previous question (indicated by Clarification in the table)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments in Clarification Dialogue Recognition", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Our approach to clarification dialogue recognition looks at certain features of the question currently under consideration (e.g. pronouns and proper nouns) and compares the meaning of the current question with the meanings of previous questions to determine whether they are \"about\" the same matter. Given a question q 0 and n previously asked questions q -1 ..q -n we have a function Clarification_Question which is true if a question is considered a clarification of a previously asked question. In the light of empirical work such as (Ginzburg 1998) , which indicates that questioners do not usually refer back to questions which are very distant, we only considered the set of the previously mentioned 10 questions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 537, |
|
"end": 552, |
|
"text": "(Ginzburg 1998)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Clarification Recognition Algorithm", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "A question is deemed to be a clarification of a previous question if: 1. There are direct references to nouns mentioned in the previous n questions through the use of pronouns (he, she, it, \u2026) or possessive adjectives (his, her, its\u2026) which have no references in the current question. 2. The question does not contain any verbs 3. There are explicit references to proper and common nouns mentioned in the previous n questions, i.e. repetitions which refer to an identical object; or there is a strong sentence similarity between the current question and the previously asked questions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Clarification Recognition Algorithm", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In other words:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Clarification Recognition Algorithm", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Clarification_Question (q n ,q -1 ..q -n ) is true if", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Clarification Recognition Algorithm", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "1. q 0 has pronoun and possessive adjective references to q -1 ..q -n 2. q 0 does not contain any verbs 3. q 0 has repetition of common or proper nouns in q -1 ..q -n or q 0 has a strong semantic similarity to some q \u2208 q -1 ..q -n", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Clarification Recognition Algorithm", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "A major part of our clarification dialogue recognition algorithm is the sentence similarity metric which looks at the similarity in meaning between the current question and previous questions. WordNet (Miller 1999; Fellbaum 1998 ), a lexical database which organizes words into synsets, sets of synonymous words, and specifies a number of relationships such as hypernym, synonym, meronym which can exist between the synsets in the lexicon, has been shown to be fruitful in the calculation of semantic similarity. One approach has been to determine similarity by calculating the length of the path or relations connecting the words which constitute sentences (see for example Green 1997 and St-Onge 1998) ; different approaches have been proposed (for an evaluation see (Budanitsky and Hirst 2001) ), either using all WordNet relations (Budanitsky and Hirst 2001) or only is-a relations (Resnik 1995; Jiang and Conrath 1997; Mihalcea and Moldvoan 1999) . Miller (1999) , Harabagiu et al. (2002) and De Boni and Manandhar (2002) found WordNet glosses, considered as micro-contexts, to be useful in determining conceptual similarity. (Lee et al. 2002) have applied conceptual similarity to the Question Answering task, giving an answer A a score dependent on the number of matching terms in A and the question. Our sentence similarity measure followed on these ideas, adding to the use of WordNet relations, part-ofspeech information, compound noun and word frequency information. In particular, sentence similarity was considered as a function which took as arguments a sentence s 1 and a second sentence s 2 and returned a value representing the semantic relevance of s 1 in respect of s 2 in the context of knowledge B, i.e.", |
|
"cite_spans": [ |
|
{ |
|
"start": 201, |
|
"end": 214, |
|
"text": "(Miller 1999;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 215, |
|
"end": 228, |
|
"text": "Fellbaum 1998", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 675, |
|
"end": 689, |
|
"text": "Green 1997 and", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 690, |
|
"end": 703, |
|
"text": "St-Onge 1998)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 769, |
|
"end": 796, |
|
"text": "(Budanitsky and Hirst 2001)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 835, |
|
"end": 862, |
|
"text": "(Budanitsky and Hirst 2001)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 886, |
|
"end": 899, |
|
"text": "(Resnik 1995;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 900, |
|
"end": 923, |
|
"text": "Jiang and Conrath 1997;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 924, |
|
"end": 951, |
|
"text": "Mihalcea and Moldvoan 1999)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 954, |
|
"end": 967, |
|
"text": "Miller (1999)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 970, |
|
"end": 993, |
|
"text": "Harabagiu et al. (2002)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 998, |
|
"end": 1026, |
|
"text": "De Boni and Manandhar (2002)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1131, |
|
"end": 1148, |
|
"text": "(Lee et al. 2002)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Similarity Metric", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "( s 1 , s 2 , B ) = n \u2208 semantic-relevance(s 1 ,s,B) < semantic- relevance(s 2 ,s, B)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "semantic-relevance", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "represents the fact that sentence s 1 is less relevant than s 2 in respect to the sentence s and the context B. In our experiments, B was taken to be the set of semantic relations given by WordNet. Clearly, the use of a different knowledge base would give different results, depending on its completeness and correctness.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "semantic-relevance", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In order to calculate the semantic similarity between a sentence s 1 and another sentence s 2 , s 1 and s 2 were considered as sets P and Q of word stems. The similarity between each word in the question and each word in the answer was then calculated and the sum of the closest matches gave the overall similarity. In other words, given two sets Q and P, where Q={qw 1 ,qw 2 ,\u2026,qw n } and P={pw 1 ,pw 2 ,\u2026,pw m }, the similarity between Q and P is given by 1<p<n Argmax m similarity( qw p , pw m )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "semantic-relevance", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The function similarity( w 1 , w 2 ) maps the stems of the two words w 1 and w 2 to a similarity measure m representing how semantically related the two words are; similarity( w i , w j )< similarity( w i , w k ) represents the fact that the word w j is less semantically related than w k in respect to the word w i . In particular similarity=0 if two words are not at all semantically related and similarity=1 if the words are the same.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "semantic-relevance", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "similarity( w 1 , w 2 ) = h \u2208", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "semantic-relevance", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where 0 \u2264 h \u2264 1. In particular, similarity( w 1 , w 2 ) = 0 if w 1 \u2208ST \u2228 w 2 \u2208ST, where ST is a set containing a number of stop-words (e.g. \"the\", \"a\", \"to\") which are too common to be able to be usefully employed to estimate semantic similarity. In all other cases, h is calculated as follows: the words w 1 and w 2 are compared using all the available WordNet relationships (is-a, satellite, similar, pertains, meronym, entails, etc.), with the additional relationship, \"same-as\", which indicated that two words were identical. Each relationship is given a weighting indicating how related two words are, with a \"same as\" relationship indicating the closest relationship, followed by synonym relationships, hypernym, hyponym, then satellite, meronym, pertains, entails.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "semantic-relevance", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "So, for example, given the question \"Who went to the mountains yesterday?\" and the second question \"Did Fred walk to the big mountain and then to mount Pleasant?\", Q would be the set {who, go, to, the, mountain, yesterday} and P would be the set {Did, Fred, walk, to, the, big, mountain, and, then, to, mount, Pleasant}.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "semantic-relevance", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In order to calculate similarity the algorithm would consider each word in turn. \"Who\" would be ignored as it is a common word and hence part of the list of stopwords. \"Go\" would be related to \"walk\" in a is-a relationship and receive a score h 1 . \"To\" and \"the\" would be found in the list of stop-words and ignored. \"Mountain\" would be considered most similar to \"mountain\" (same-as relationship) and receive a score h 2 : \"mount\" would be in a synonym relationship with \"mountain\" and give a lower score, so it is ignored. \"Yesterday\" would receive a score of 0 as there are no semantically related words in Q. The similarity measure of Q in respect to P would therefore be given by h 1 + h 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "semantic-relevance", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In order to improve performance of the similarity measure, additional information was considered in addition to simple word matching (see De Boni and Manandhar 2003 for a complete discussion):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "semantic-relevance", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Compound noun information. The motivation behind is similar to the reason for using chunking information, i.e. the fact that the word \"United\" in \"United States\" should not be considered similar to \"United\" as in \"Manchester United\". As opposed to when using chunking information, however, when using noun compound information, the compound is considered a single word, as opposed to a group of words: chunking and compound noun information may therefore be combined as in \"[the [United States] official team]\". \u2022 Proper noun information. The intuition behind this is that titles (of books, films, etc.) should not be confused with the \"normal\" use of the same words: \"blue lagoon\" as in the sentence \"the film Blue Lagoon was rather strange\" should not be considered as similar to the same words in the sentence \"they swan in the blue lagoon\" as they are to the sentence \"I enjoyed Blue Lagoon when I was younger\". \u2022 Word frequency information. This is a step beyond the use of stop-words, following the intuition that the more a word is common the less it is useful in determining similarity between sentence. So, given the sentences \"metatheoretical reasoning is common in philosophy\" and \"metatheoretical arguments are common in philosophy\", the word \"metatheoretical\" should be considered more important in determining relevance than the words \"common\", \"philosophy\" and \"is\" as it is much more rare and therefore less probably found in irrelevant sentences. Word frequency data was taken from the Given that the questions examined were generic queries which did not necessarily refer to a specific set of documents, the word frequency for individual words was taken to be the word frequency given in the British National Corpus (see BNCFreq 2003). The top 100 words, making up 43% of the English Language, were then used as stop-words and were not used in calculating semantic similarity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "semantic-relevance", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "An implementation of the algorithm was evaluated on the TREC context questions used to develop the algorithm and then on the collection of 500 new clarification dialogue questions. The results on the TREC data, which was used to develop the algorithm, were as follows (see below for discussion and an explanation of each method): Where \"New\" indicates the ability to recognize whether the current question is the first in a new series of clarification questions and \"Clarif.\" (for \"Clarification\") indicates the ability to recognize whether the current question is a clarification question.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The results for the same experiments conducted on the collected data were as follows: Method 0. This method did not use any linguistic information and simply took a question to be a clarification question if it had any words in common with the previous n questions, else took the question to be the beginning of a new series. 64% of questions in the new collection could be recognized with this simple algorithm, which did not misclassify any \"new\" questions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Collected Meth.0", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Method 1. This method employed point 1 of the algorithm described in section 5: 62% of questions in the new collection could be recognized as clarification questions simply by looking for \"reference\" keywords such as he, she, this, so, etc. which clearly referred to previous questions. Interestingly this did not misclassify any \"new\" questions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Method 2. This method employed points 1 and 2 of the algorithm described in section 5: 5% of questions in the new collection could be recognized simply by looking for the absence of verbs, which, combined with keyword lookup (Method 1), improved performance to 66%. Again this did not misclassify any \"new\" questions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Method 3a. This method employed the full algorithm described in section 5 (point 3 is the similarity measure algorithm described in section 6): clarification recognition rose to 91% of the new collection by looking at the similarity between nouns in the current question and nouns in the previous questions, in addition to reference words and the absence of verbs. Misclassification was a serious problem, however with correctly classified \"new\" questions falling to 67%.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Method 3b. This was the same as method 3a, but specified a similarity threshold when employing the similarity measure described in section 6: this required the nouns in the current question to be similar to nouns in the previous question beyond a specified similarity threshold. This brought clarification question recognition down to 89% of the new collection, but misclassification of \"new\" questions was reduced significantly, with \"new\" questions being correctly classified 83% of the time. Problems noted were: \u2022 False positives: questions following a similar but unrelated question series. E.g. \"Are they all Muslim countries?\" (talking about religion, but in the context of a general conversation about Saudi Arabia) followed by \"What is the chief religion in Peru?\" (also about religion, but in a totally unrelated context). \u2022 Questions referring to answers, not previous questions (e.g. clarifying the meaning of a word contained in the answer, or building upon a concept defined in the answer: e.g. \"What did Antonio Carlos Tobim play?\" following \"Which famous musicians did he play with?\" in the context of a series of questions about Fank Sinatra: Antonio Carlos Tobim was referred to in the answer to the previous question, and nowhere else in the exchange. These made up 3% of the missed clarifications. \u2022 Absence of relationships in WordNet, e.g. between \"NASDAQ\" and \"index\" (as in share index). Absence of verb-noun relationships in WordNet, e.g. between to die and death, between \"battle\" and \"win\" (i.e. after a battle one side generally wins and another side loses), \"airport\" and \"visit\" (i.e. people who are visiting another country use an airport to get there)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "As can be seen from the tables above, the same experiments conducted on the TREC context questions yielded worse results; it was difficult to say, however, whether this was due to the small size of the TREC data or the nature of the data itself, which perhaps did not fully reflect \"real\" dialogues.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "As regards the recognition of question in a series (the recognition that a clarification I taking place), the number of sentences recognized by keyword alone was smaller in the TREC data (53% compared to 62%), while the number of questions not containing verbs was roughly similar (about 6%). The improvement given by computing noun similarity between successive questions gave worse results on the TREC data: using method 3a resulted in an improvement to the overall correctness of 19 percentage points, or a 32% increase (compared to an improvement of 25 percentage points, or a 38% increase on the collected data); using method 3b resulted in an improvement of 13 percentage points, or a 22% increase (compared to an improvement of 23 percentage points or a 35% increase on the collected data), perhaps indicating that in \"real\" conversation speakers tend to use simpler semantic relationships than what was observed in the TREC data. Recognition", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Recognizing that a clarification dialogue is occurring only makes sense if this information can then be used to improve answer retrieval performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We therefore hypothesized that noting that a questioner is trying to clarify previously asked questions is important in order to determine the context in which an answer is to be sought: in other words, the answers to certain questions are constrained by the context in which they have been uttered. The question \"What does attenuate mean?\", for example, may require a generic answer outlining all the possible meanings of \"attenuate\" if asked in isolation, or a particular meaning if asked after the word has been seen in an answer (i.e. in a definite context which constrains its meaning). In other cases, questions do not make sense at all out of a context. For example, no answer could be given to the question \"where?\" asked on its own, while following a question such as \"Does Sean have a house anywhere apart from Scotland?\" it becomes an easily intelligible query.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The usual way in which Question Answering systems constrain possible answers is by restricting the number of documents in which an answer is sought by filtering the total number of available documents through the use of an information retrieval engine. The information retrieval engine selects a subset of the available documents based on a number of keywords derived from the question at hand. In the simplest case, it is necessary to note that some words in the current question refer to words in previous questions or answers and hence use these other words when formulating the IR query. For example, the question \"Is he married?\" cannot be used as is in order to select documents, as the only word passed to the IR engine would be \"married\" (possibly the root version \"marry\") which would return too many documents to be of any use. Noting that the \"he\" refers to a previously mentioned person (e.g. \"Sean Connery\") would enable the answerer to seek an answer in a smaller number of documents. Moreover, given that the current question is asked in the context of a previous question, the documents retrieved for the previous related question could provide a context in which to initially seek an answer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "In order to verify the usefulness of constraining the set of documents from in which to seek an answer, a subset made of 15 clarification dialogues (about 100 questions) from the given question data was analyzed by taking the initial question for a series, submitting it to the Google Internet Search Engine and then manually checking to see how many of the questions in the series could be answered simply by using the first 20 documents retrieved for the first question in a series.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The results are summarized in the following diagram ( Fig. 1) : \u2022 69% of clarification questions could be answered by looking within the documents used for the previous question in the series, thus indicating the usefulness of noting the occurrence of clarification dialogue. \u2022 The remaining 31% could not be answered by making reference to the previously retrieved documents, and to find an answer a different approach had to be taken. In particular: \u2022 6% could be answered after retrieving documents simply by using the words in the question as search terms (e.g. \"What caused the boxer uprising?\"); \u2022 14% required some form of coreference resolution and could be answered only by combining the words in the question with the words to which the relative pronouns in the question referred (e.g. \"What film is he working on at the moment\", with the reference to \"he\" resolved, which gets passed to the search engine as \"What film is Sean Connery working on at the moment?\"); \u2022 7% required more than 20 documents to be retrieved by the search engine or other, more complex techniques. An example is a question such as \"Where exactly?\" which requires both an understanding of the context in which the question is asked (\"Where?\" makes no sense on its own) and the previously given answer (which was probably a place, but not restrictive enough for the questioner). \u2022 4% constituted mini-clarification dialogues within a larger clarification dialogue (a slight deviation from the main topic which was being investigated by the questioner) and could be answered by looking at the documents retrieved for the first question in the mini-series.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 54, |
|
"end": 61, |
|
"text": "Fig. 1)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Recognizing that a clarification dialogue is occurring therefore can simplify the task of retrieving an answer by specifying that an answer must be in the set of documents used the previous questions. This is consistent with the results found in the TREC context task (Voorhees 2002) , which indicated that systems were capable of finding most answers to questions in a context dialogue simply by looking at the documents retrieved for the initial question in a series. As in the case of clarification dialogue recognition, therefore, simple techniques can resolve the majority of cases; nevertheless, a full solution to the problem requires more complex methods. The last case indicates that it is not enough simply to look at the documents provided by the first question in a series in order to seek an answer: it is necessary to use the documents found for a previously asked question which is related to the current question (i.e. the questioner could \"jump\" between topics). For example, given the following series of questions starting with Q 1 : Q 1 : When was the Hellenistic Age? [\u2026] Q 5 : How did Alexander the great become ruler? Q 6 : Did he conquer anywhere else? Q 7 : What was the Greek religion in the Hellenistic Age?", |
|
"cite_spans": [ |
|
{ |
|
"start": 268, |
|
"end": 283, |
|
"text": "(Voorhees 2002)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "where Q 6 should be related to Q 5 but Q 7 should be related to Q 1 , and not Q 6 . In this case, given that the subject matter of Q 1 is more immediately related to the subject matter of Q 7 than Q 6 (although the subject matter of Q 6 is still broadly related, it is more of a specialized subtopic), the documents retrieved for Q 1 will probably be more relevant to Q 7 than the documents retrieved for Q 6 (which would probably be the same documents retrieved for Q 5 )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "It has been shown that recognizing that a clarification dialogue is occurring can simplify the task of retrieving an answer by constraining the subset of documents in which an answer is to be found. An algorithm was presented to recognize the occurrence of clarification dialogue and is shown to have a good performance. The major limitation of our algorithm is the fact that it only considers series of questions, not series of answers. As noted above, it is often necessary to look at an answer to a question to determine whether the current question is a clarification question or not. Our sentence similarity algorithm was limited by the number of semantic relationships in WordNet: for example, a big improvement would come from the use of noun-verb relationships. Future work will be directed on extending WordNet in this direction and in providing other useful semantic relationships. Work also needs to be done on using information given by answers, not just questions in recognizing clarification dialogue and on coping with the cases in which clarification dialogue recognition is not enough to retrieve an answer and where other, more complex, techniques need to be used. It would also be beneficial to examine the use of a similarity function in which similarity decayed in function of the distance in time between the current question and the past questions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "9" |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Using dynamic user models in the recognition of the plans of the user", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Ardissono", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Sestero", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "User Modeling and User-Adapted Interaction", |
|
"volume": "5", |
|
"issue": "2", |
|
"pages": "157--190", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ardissono, L. and Sestero, D. 1996. \"Using dynamic user models in the recognition of the plans of the user\". User Modeling and User-Adapted Interaction, 5(2):157-190.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "English Word Frequency List", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bncfreq", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "BNCFreq. 2003. English Word Frequency List. http://www.eecs.umich.edu/~qstout/586/bncfreq.html (last accessed March 2003).", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Semantic distance in WordNet: and experimental, application-oriented evaluation of five measures", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Budanitsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Hirst", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of the NAACL 2001 Workshop on WordNet and other lexical resources", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Budanitsky, A., and Hirst, G. 2001. \"Semantic distance in WordNet: and experimental, application-oriented evaluation of five measures\", in Proceedings of the NAACL 2001 Workshop on WordNet and other lexical resources, Pittsburgh.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "The Use of Sentence Similarity as a Semantic Relevance Metric for Question Answering", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "De Boni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Manandhar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of the AAAI Symposium on New Directions in Question Answering", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "De Boni, M. and Manandhar, S. 2003. \"The Use of Sentence Similarity as a Semantic Relevance Metric for Question Answering\". Proceedings of the AAAI Symposium on New Directions in Question Answering, Stanford.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Automated Discovery of Telic Relations for WordNet", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "De Boni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Manandhar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the First International WordNet Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "De Boni, M. and Manandhar, S. 2002. \"Automated Discovery of Telic Relations for WordNet\". Proceedings of the First International WordNet Conference, India.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "WordNet, An electronic Lexical Database", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Fellbaum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fellbaum, C. 1998. WordNet, An electronic Lexical Database, MIT Press.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Clarifying Utterances", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Ginzburg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Proceedings of the 2nd Workshop on the Formal Semantics and Pragmatics of Dialogue", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ginzburg , J. 1998. \"Clarifying Utterances\" In: J. Hulstijn and A. Nijholt (eds.) Proceedings of the 2nd Workshop on the Formal Semantics and Pragmatics of Dialogue, Twente.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Interrogative Investigations, CSLI", |
|
"authors": [ |
|
{ |
|
"first": "Sag", |
|
"middle": [], |
|
"last": "Ginzburg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ginzburg and Sag, 2000. Interrogative Investigations, CSLI.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Automatically generating hypertext by computing semantic similarity", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Green", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Green, S. J. 1997. Automatically generating hypertext by computing semantic similarity, Technical Report n. 366, University of Toronto.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "WordNet2 -a morphologically and semantically enhanced resource", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Harabagiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Miller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Moldovan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Proceedings of SIGLEX-99", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Harabagiu, S., Miller, A. G., Moldovan, D. 1999. \"WordNet2 -a morphologically and semantically enhanced resource\", In Proceedings of SIGLEX-99, University of Maryland.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Answering Complex, List and Context Questions with LCC's Question-Answering Server", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Harabagiu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of TREC-10", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Harabagiu, S., et al. 2002. \"Answering Complex, List and Context Questions with LCC's Question- Answering Server\", Proceedings of TREC-10, NIST.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Lexical chains as representations of context for the detection and correction of malapropisms", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Hirst", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "St-Onge", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "WordNet: and electronic lexical database", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hirst, G., and St-Onge, D. 1998. \"Lexical chains as representations of context for the detection and correction of malapropisms\", in Fellbaum (ed.), WordNet: and electronic lexical database, MIT Press.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Semantic similarity based on corpus statistics and lexical taxonomy", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Conrath", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Proceedings of ICRCL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiang, J. J., and Conrath, D. W. 1997. \"Semantic similarity based on corpus statistics and lexical taxonomy\", in Proceedings of ICRCL, Taiwan.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "SiteQ: Engineering High Performance QA System Using Lexico-Semantic Pattern Matching and Shallow NLP", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of TREC-10", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lee, G. G., et al. 2002. \"SiteQ: Engineering High Performance QA System Using Lexico-Semantic Pattern Matching and Shallow NLP\", Proceedings of TREC-10, NIST.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "An information-theoretic definition of similarity", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Proceedings of the 15th International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lin, D. 1998. \"An information-theoretic definition of similarity\", in Proceedings of the 15th International Conference on Machine Learning, Madison.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "A Method for Word Sense Disambiguation of Unrestricted Text", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Moldovan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Proceedings of ACL '99", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mihalcea, R. and Moldovan, D. 1999. \"A Method for Word Sense Disambiguation of Unrestricted Text\", in Proceedings of ACL '99, Maryland, NY.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "WordNet: A Lexical Database", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Miller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Communications of the ACM", |
|
"volume": "38", |
|
"issue": "11", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Miller, G. A. 1999. \"WordNet: A Lexical Database\", Communications of the ACM, 38 (11).", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Logic Form Transformation of WordNet and its Applicability to Question Answering", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Moldovan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Rus", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of the 39th conference of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Moldovan, D. and Rus, V. 2001. \"Logic Form Transformation of WordNet and its Applicability to Question Answering\", Proceedings of the 39th conference of ACL, Toulouse.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Using information content to evaluate semantic similarity", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Resnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Proceedings of the 14th IJCAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Resnik, P. 1995. \"Using information content to evaluate semantic similarity\", in Proceedings of the 14th IJCAI, Montreal.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Patterns of Potential Answer Expressions as Clues to the Right Answers", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Soubbotin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of TREC-10", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Soubbotin, M. M. 2002. :\"Patterns of Potential Answer Expressions as Clues to the Right Answers\", Proceedings of TREC-10, NIST.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "From plan critiquing to clarification dialogue for cooperative response generation", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Van Beek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Cohen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Schmidt", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "Computational Intelligence", |
|
"volume": "9", |
|
"issue": "", |
|
"pages": "132--154", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "van Beek, P., Cohen, R. and Schmidt, K., 1993. \"From plan critiquing to clarification dialogue for cooperative response generation\", Computational Intelligence 9:132-154.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Overview of the TREC 2001 Question Answering Track", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Voorhees", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of TREC-10", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Voorhees, E. 2002. \"Overview of the TREC 2001 Question Answering Track\", Proceedings of TREC- 10, NIST.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"text": "Fig. 1: Search technique used for Question", |
|
"uris": null, |
|
"type_str": "figure" |
|
} |
|
} |
|
} |
|
} |