|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T11:39:28.318810Z" |
|
}, |
|
"title": "FRAQUE: a FRAme-based QUEstion-answering system for the Public Administration domain", |
|
"authors": [ |
|
{ |
|
"first": "Martina", |
|
"middle": [], |
|
"last": "Miliani", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Universit\u00e0 per Stranieri di Siena", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Lucia", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Passaro", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "CoLing Lab (Dipartimento di Filologia", |
|
"institution": "Universit\u00e0 di Pisa", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Lenci", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "CoLing Lab (Dipartimento di Filologia", |
|
"institution": "Universit\u00e0 di Pisa", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "In this paper, we propose FRAQUE, a question answering system for factoid questions in the Public Administration domain. The system is based on semantic frames, here intended as collections of slots typed with their possible values. FRAQUE is a pattern-base system that queries unstructured data, such as documents, web pages, and social media posts. Our system can exploit the potential of different approaches: it extracts pattern elements from texts which are linguistically analysed by means of statistical methods. FRAQUE allows Italian users to query vast document repositories related to the domain of Public Administration. Given the statistical nature of most of its components such as word embeddings, the system allows for a flexible domain and language adaptation process. FRAQUE's goal is to associate questions with frames stored into a Knowledge Graph along with relevant document passages, which are returned as the answer. In order to guarantee the system usability, the implementation of FRAQUE is based on a user-centered design process, which allowed us to monitor the linguistic structures employed by users, as well as to find which terms were the most common in users' questions.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "In this paper, we propose FRAQUE, a question answering system for factoid questions in the Public Administration domain. The system is based on semantic frames, here intended as collections of slots typed with their possible values. FRAQUE is a pattern-base system that queries unstructured data, such as documents, web pages, and social media posts. Our system can exploit the potential of different approaches: it extracts pattern elements from texts which are linguistically analysed by means of statistical methods. FRAQUE allows Italian users to query vast document repositories related to the domain of Public Administration. Given the statistical nature of most of its components such as word embeddings, the system allows for a flexible domain and language adaptation process. FRAQUE's goal is to associate questions with frames stored into a Knowledge Graph along with relevant document passages, which are returned as the answer. In order to guarantee the system usability, the implementation of FRAQUE is based on a user-centered design process, which allowed us to monitor the linguistic structures employed by users, as well as to find which terms were the most common in users' questions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Although late, Italy is slowly advancing in the digitization process of Public Administration data and services (Carloni, 2019) . Now, more and more institutions in Italy manage data and delivery services on the web. Several municipalities started to adopt Question Answering Systems (QASs), chatbots, and digital assistants to ease citizens' access to public data. A wide range of citizens can use these systems since they permit to query vast repositories in natural language (Hovy et al., 2000; Ojokoh, 2018) . In this paper, we propose FRAQUE (FRAme-based QUEstion-answering), a domain-specific question answering system for factoid questions. Our system exploits semantic frames, here intended as templates consisting of a set of slots typed with their possible values (Minsky, 1974; Jurafsky and Martin, 2019) . Thanks to frames, our QAS can query unstructured data, such as documents, web pages, and social media posts. We applied FRAQUE to the administrative domain in the Italian language. Nonetheless, the system is potentially adaptable to different domains and different languages. It relies on the statistical components of CoreNLP-it (Bondielli et al., 2018) for morphosyntactic analysis, which exploits the Universal Dependencies (UD) annotation scheme (Nivre, 2015) . Statistical components are also employed for the semantic analysis of questions for Named Entity Recognition (NER) and term extraction. Finally, our system performs query expansion following an unsupervised approach based on word embeddings (Mikolov et al., 2013) . A first implementation of FRAQUE has been developed on the administrative domain. Our target users are municipality officers and common citizens who need to access the rich amount of information hidden in public documents. In particular, we decided to focus on citizens, who are supposed to use a QAS to get notice about municipality regulations and to receive other kind of information related to a certain administrative area. In order to guarantee the effec-tiveness and the usability of FRAQUE, we followed usercenter design principles introduced by Gould and Lewis (1985) . We collected questions written by Italian native speakers to assess FRAQUE's outcomes. We tested FRAQUE on the administrative domain by employing the information extracted from a set of Italian documents including administrative acts, social media posts, and official municipality web pages. In particular, FRAQUE has been embedded into a dialogue management system and has been tested as a module of a larger project involving several instruments developed for the Public Administration (PA) domain. The paper is structured as follows: An overview on QASs is given in Section 2., the definition of FRAQUE methodology is outlined in Section 3. The evaluation of the system in a real-case scenario is described in Section 4.", |
|
"cite_spans": [ |
|
{ |
|
"start": 112, |
|
"end": 127, |
|
"text": "(Carloni, 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 478, |
|
"end": 497, |
|
"text": "(Hovy et al., 2000;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 498, |
|
"end": 511, |
|
"text": "Ojokoh, 2018)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 774, |
|
"end": 788, |
|
"text": "(Minsky, 1974;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 789, |
|
"end": 815, |
|
"text": "Jurafsky and Martin, 2019)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 1148, |
|
"end": 1172, |
|
"text": "(Bondielli et al., 2018)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 1268, |
|
"end": 1281, |
|
"text": "(Nivre, 2015)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 1525, |
|
"end": 1547, |
|
"text": "(Mikolov et al., 2013)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 2104, |
|
"end": 2126, |
|
"text": "Gould and Lewis (1985)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "Existing QASs have been categorized in different ways, e.g. depending on the addressed question type (e.g., confirmation questions, factoid questions, list questions), on the features of consulted data bases (e.g., full relational databases, RDF databases), on the adopted approaches and techniques (Ojokoh, 2018) . According to Dwivedi and Singh (2013) and Pundge et al. (2016) QASs can be distinguished into three different categories on the basis of the adopted approach: linguistic approach (Green et al., 1961; Clark et al., 1999; Fader and Etzioni, 2013; Berant et al., 2013) , statistical approach (Moschitti, 2003; Ferrucci, 2010; Chen et al., 2017; Devlin et al., 2019) and pattern matching approach (Ravichandran and Hovy, 2002; Pa\u015fca, 2003) . QASs based on a linguistic approach exploit Natural Language Processing (NLP) and language resources such as knowledge-based or corpora. The knowledge architecture of these systems relies on production rules, logic, frames, templates, ontologies, and semantic networks (Dwivedi and Singh, 2013 ). On the one hand, the linguistic approach is very effective in specific domains. On the other hand, it shows limitations in portability through different domains, since building an appropriate knowledge base has usually heavy time costs. On the contrary, statistical approaches are easily adapted to various domains since they are independent of any language form. This kind of QASs are often based on Support Vector Machine (SVM) classifiers, Bayesian classifiers, Maximum Entropy models and Neural Networks (NN) . Such question classifiers analyze the user's question to make predictions about the expected answer type, thanks to statistical measures. Statistical QASs require an adequate amount of data to train the models, therefore in this case the development cost moves from the manual production of linguistic rules to the preparation of annotated resources to feed the classifiers. Pattern matching approaches exploit text patterns to analyze the question to select and return the right answer. For example, the question \"Where was Cricket World Cup 2012 held?\" corresponds to the pattern \"Where was <Event Name> held?\" and is associated with the answer pattern \"<Event Name> was held at <Location>\" (Dwivedi and Singh, 2013) . These systems are less complex than those exploiting linguistic features, which require time and specific human skills, and most of them automatically learn patterns from texts (Dwivedi and Singh, 2013; Hovy et al., 2000) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 299, |
|
"end": 313, |
|
"text": "(Ojokoh, 2018)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 329, |
|
"end": 353, |
|
"text": "Dwivedi and Singh (2013)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 358, |
|
"end": 378, |
|
"text": "Pundge et al. (2016)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 495, |
|
"end": 515, |
|
"text": "(Green et al., 1961;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 516, |
|
"end": 535, |
|
"text": "Clark et al., 1999;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 536, |
|
"end": 560, |
|
"text": "Fader and Etzioni, 2013;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 561, |
|
"end": 581, |
|
"text": "Berant et al., 2013)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 605, |
|
"end": 622, |
|
"text": "(Moschitti, 2003;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 623, |
|
"end": 638, |
|
"text": "Ferrucci, 2010;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 639, |
|
"end": 657, |
|
"text": "Chen et al., 2017;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 658, |
|
"end": 678, |
|
"text": "Devlin et al., 2019)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 709, |
|
"end": 738, |
|
"text": "(Ravichandran and Hovy, 2002;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 739, |
|
"end": 751, |
|
"text": "Pa\u015fca, 2003)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 1023, |
|
"end": 1047, |
|
"text": "(Dwivedi and Singh, 2013", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 1559, |
|
"end": 1563, |
|
"text": "(NN)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 2259, |
|
"end": 2284, |
|
"text": "(Dwivedi and Singh, 2013)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 2464, |
|
"end": 2489, |
|
"text": "(Dwivedi and Singh, 2013;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 2490, |
|
"end": 2508, |
|
"text": "Hovy et al., 2000)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "Furthermore, as reported by Jurafsky and Martin (2019) , there are two different major paradigms of QASs: information-retrieval based and knowledge-based. In the former case, systems leverage on a vast quantity of textual information, which is retrieved and returned thanks to text analysis methods (Brill et al., 2002; Pa\u015fca, 2003; Lin, 2007; Fader and Etzioni, 2013; Chen et al., 2017; Devlin et al., 2019) . In the latter case, semantic data are already structured into knowledge bases (Green et al., 1961; Clark et al., 1999; Ravichandran and Hovy, 2002; Fader and Etzioni, 2013; Berant et al., 2013) . Finally, hybrid systems, like IBM Watson DeepQA (Ferrucci, 2010) , rely both on text datasets and structured knowledge bases to answer questions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 28, |
|
"end": 54, |
|
"text": "Jurafsky and Martin (2019)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 299, |
|
"end": 319, |
|
"text": "(Brill et al., 2002;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 320, |
|
"end": 332, |
|
"text": "Pa\u015fca, 2003;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 333, |
|
"end": 343, |
|
"text": "Lin, 2007;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 344, |
|
"end": 368, |
|
"text": "Fader and Etzioni, 2013;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 369, |
|
"end": 387, |
|
"text": "Chen et al., 2017;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 388, |
|
"end": 408, |
|
"text": "Devlin et al., 2019)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 489, |
|
"end": 509, |
|
"text": "(Green et al., 1961;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 510, |
|
"end": 529, |
|
"text": "Clark et al., 1999;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 530, |
|
"end": 558, |
|
"text": "Ravichandran and Hovy, 2002;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 559, |
|
"end": 583, |
|
"text": "Fader and Etzioni, 2013;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 584, |
|
"end": 604, |
|
"text": "Berant et al., 2013)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 655, |
|
"end": 671, |
|
"text": "(Ferrucci, 2010)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "Following such a classification, FRAQUE can be seen as an hybrid approach system. Firstly, it is based on linguistic analysis through statistical methods, which serves as prerequisite to maximize the performance of pattern matching techniques application. Secondly, it draws its data from a thesaurus and a Knowledge Graph (KG) both structured into semantic frames. In the thesaurus, simple terms, complex terms, and named entities related to the same frame are clustered and arranged into patterns exploited for the question analysis. In the KG, each slot frame contains a text passage (i.e., a single sentence snippet), selected through a ranking process measuring its relevance for that frame slot. Differently from relational databases, a pre-defined set of relations is not required by a KG, so that a more flexible object-oriented data storage is guaranteed (Miliani et al., 2019) . Moreover, FRAQUE applies statistical techniques to identify and cluster data, such as word embeddings and classifiers.", |
|
"cite_spans": [ |
|
{ |
|
"start": 864, |
|
"end": 886, |
|
"text": "(Miliani et al., 2019)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "In this section we present an overview of the user-centered design process employed to create FRAQUE. Moreover, we report on its components through the three main stages described in Dwivedi and Singh (2013) , namely document analysis, question analysis and answer analysis. Figure 1 : The diagram shows the FRAQUE analysis pipeline, which shares some modules with the Text Frame Detector (TFD) system (Miliani et al., 2019) . Components in the central box belong to both FRAQUE and TFD systems. Except for the answer analysis component, all the other FRAQUE modules are employed in the question analysis described in Section 3.3.", |
|
"cite_spans": [ |
|
{ |
|
"start": 183, |
|
"end": 207, |
|
"text": "Dwivedi and Singh (2013)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 402, |
|
"end": 424, |
|
"text": "(Miliani et al., 2019)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 275, |
|
"end": 283, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The FRAQUE Methodology", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "We decided to adopt a user-centered design process (Gould and Lewis, 1985) to consider users' needs as a fundamental requirement for FRAQUE implementation. We distributed a questionnaire to 30 users divided into four age groups: 18 \u2212 25 (15%); 26 \u2212 35 (33.3%); 36 \u2212 50 (20%); 51 \u2212 65 (30%). We asked the users to write a small number of questions, pretending to interact with a QAS. The questionnaire allowed us to monitor the linguistic structures employed by users, as well as to find which terms were the most common in users' questions so that it was easier to identify frame triggers and attribute triggers. (see Section 3.2.). Further linguistic features detected by analyzing users' questions were: (i) lack of punctuation; (ii) variable length of questions: from 1 to 15 tokens (the shorter ones contained only keywords, as if the users were querying a search engine); (iii) typos. Considering (i) and (ii), we opted for avoiding fixed pattern for question analysis: we decided to look for patterns of unordered elements on the question text, without sticking to fixed term sequences.", |
|
"cite_spans": [ |
|
{ |
|
"start": 51, |
|
"end": 74, |
|
"text": "(Gould and Lewis, 1985)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User-Centered Design Process", |
|
"sec_num": "3.1." |
|
}, |
|
{ |
|
"text": "Document analysis consists of identifying candidate documents and detecting possible answers among document snippets (Dwivedi and Singh, 2013) . The knowledge base employed by our system is a KG populated by the Text Frame Detector (TFD), an Information Extraction (IE) system described by Miliani et al. (2019) (see Figure 1 ), containing semantic frames selected through the design process described in Section 3.1. (see Figure 2 ). ", |
|
"cite_spans": [ |
|
{ |
|
"start": 117, |
|
"end": 142, |
|
"text": "(Dwivedi and Singh, 2013)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 290, |
|
"end": 311, |
|
"text": "Miliani et al. (2019)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 317, |
|
"end": 325, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 423, |
|
"end": 431, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Document Analysis", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "As anticipated, FRAQUE and TFD have been embedded into a dialogue management system as the QAS of a chatbot. The systems are part of a bigger project that involves several instruments aimed at analyzing and indexing documents belonging to the PA domain. In particular, FRAQUE and TFD work downstream of a complex indexing process composed of both general purpose and domain specific components. First of all, TFD exploits two different linguistic pipelines: T2K 2 (Dell'Orletta et al., 2014) and CoreNLP-it (Bondielli et al., 2018) . The former has been adapted for administrative acts analysis, the latter for the annotation of questions and texts like social media posts, since it includes statistical models for tokenization, sentence splitting, Part-of-Speech (PoS) tagging, and parsing. For event detection, our QAS exploits a model embedded in the broader system where it has been integrated. To extract NEs, the Stanford NER (Manning et al., 2014) is employed. In particular, it exploits the INFORMed PA model to extract entities related to the administrative domain. Furthermore, it employs EXTra (Passaro and Lenci, 2016) for in-domain complex terms extraction. Table 1 shows the performances of the components used for the morphosyntactic and semantic analysis of texts. As anticipated, T2K 2 has been employed to analyze administrative acts, but to our knowledge its performances have not been assessed on the PA domain yet. We report an evaluation performed over general-purpose documents (Dell'Orletta, 2009) . Nevertheless, it is worth mentioning that morphosyntactic annotation underlying INFORMed PA, and EXTra was carried out with the adapted version of T2k 2 to the PA domain.", |
|
"cite_spans": [ |
|
{ |
|
"start": 507, |
|
"end": 531, |
|
"text": "(Bondielli et al., 2018)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 932, |
|
"end": 954, |
|
"text": "(Manning et al., 2014)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 1105, |
|
"end": 1130, |
|
"text": "(Passaro and Lenci, 2016)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 1501, |
|
"end": 1521, |
|
"text": "(Dell'Orletta, 2009)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1171, |
|
"end": 1178, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Linguistic Analysis and preparatory IE process", |
|
"sec_num": "3.2.1." |
|
}, |
|
{ |
|
"text": "PA MEASURE SCORE ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "COMPONENT", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In FRAQUE, each frame F encodes semantic categories relevant for a specific domain, such as the TAX frame for the administrative domain. \"Municipality Tax\" or \"Garbage Tax\" are linguistic cues called frame triggers (F t ) and enable the detection of frame instances on texts. Deadline and methods of payment are considered attributes (A). Attributes encode the relevant features of the semantic category represented by each frame. Attribute triggers (T ) ease the attribute extraction from texts. T and F t are both expressed by simple and complex terms, Named Entities (NEs), and Temporal Expressions (TEs). For instance, the deadline attribute is detected by the triggers \"disbursement\", \"installment\", and usually by date (see Figure 3 ). For ease of reading, the examples provided along the paper have been translated in English. Triggers are stored in an thesaurus and linked to the related frames and attributes. They are registered with their standard form s and a small number of orthographic and morphosyntactic variants v selected by domain experts. Trigger variants are expanded with their semantic neighbors to improve frame and attribute recall. In Figure 3 , the attribute triggers \"wire transfer\" and \"postal order\" are tagged with their standard form \"payment-form\". After the linguistic analysis, we applied TFD to search frame and attribute triggers on the text, in the same or adjacent sentences. The snippet in Figure 3 shows the trigger for the TAX frame \"Municipality Tax\" along with several attribute triggers: simple terms, such as \"disbursement\" and \"installment\"; complex terms, like \"wire transfer\" and \"postal order\"; and TEs, i.e. \"June 18 th \" and \"December 17 th \". The extracted sentences are ranked according to different scores, taking into account metrics like the number of retrieved triggers related to a given attribute, the average distance (in tokens) between the frame and the attribute triggers, the sentence length. Consider the snippet in Figure 3 concerning the attribute methods of payment: there are three retrieved triggers (\"disbursement\", \"wire transfer\" and \"postal order\"); the average token distance between the frame trigger \"Municipality Tax\" and these triggers is (0 + 5 + 7)/3 = 4 (e.g., \"wire transfer\" is five tokens distant from \"Municipality Tax\"); finally, the sentence length is 22 tokens. The sentence with the highest rank is linked to the related attribute. More specifically, each candidate snippet receives a double score, a Sentence Score (SS), which ranks it within the set of snippets extracted from the same document, and a Document Score (DS) ranking it within the set of snippets extracted from the entire collection of documents (Miliani et al., 2019) . Frame instances are stored in a Neo4j 1 KG. As shown in Figure 2 , each frame corresponds to a root node, which is represented by the TAX frame in the proposed example in Figure 4 . Each frame node is connected with all the frame triggers found on the collection of documents. If we consider the snippet in Figure 3 , the instance of the frame is given by the trigger \"Municipality Tax\", which labels the frame trigger node connected to \"Tax\" in Figure 4 . Frame trigger nodes are linked to attribute nodes. For instance, the snippet in Figure 3 contains information about the attribute deadline. This attribute node is connected to at least a document node, representing the document where the attribute has been extracted from: we took as example a \"Rome Municipality Act\". A snippet with the higher SS for the connected attribute is stored together with the document node. The snippet is also registered with its DS. One of the triggers extracted from the snippet in Figure 3 is \"June 18 th \", which labels the trigger variant node: this node is connected on one side to a trigger node marked by its standard form, i.e. \"date\", and on the other side to the snippet node representing the snippet containing the trigger.", |
|
"cite_spans": [ |
|
{ |
|
"start": 2704, |
|
"end": 2726, |
|
"text": "(Miliani et al., 2019)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 730, |
|
"end": 738, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 1162, |
|
"end": 1170, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 1431, |
|
"end": 1439, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 1983, |
|
"end": 1989, |
|
"text": "Figure", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 2785, |
|
"end": 2793, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 2900, |
|
"end": 2908, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 3036, |
|
"end": 3044, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 3175, |
|
"end": 3183, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 3266, |
|
"end": 3274, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 3699, |
|
"end": 3707, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Detecting Frames", |
|
"sec_num": "3.2.2." |
|
}, |
|
{ |
|
"text": "Question analysis includes parsing, question classification, and query reformulation (Dwivedi and Singh, 2013) . The main goal of the question analysis module is to find a match between a question and at least a frame attribute indexed into the KG. The analysis is carried out exploiting some components shared with the TFD for the linguistic annotation and the frame extraction (See Fig. 1 ), a focus detection (Cooper and Ruger, 2000) and a question evaluation process, aiming at associating each question to the right frame and attribute and formulate the query to the KB. With the same goal, a query expansion module exploits word embeddings to find triggers among the semantic neighbours of questions ngrams (see Figure 1 ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 85, |
|
"end": 110, |
|
"text": "(Dwivedi and Singh, 2013)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 412, |
|
"end": 436, |
|
"text": "(Cooper and Ruger, 2000)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 384, |
|
"end": 390, |
|
"text": "Fig. 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 718, |
|
"end": 726, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Question Analysis", |
|
"sec_num": "3.3." |
|
}, |
|
{ |
|
"text": "1 http://neo4j.com/ Figure 4 : The Knowledge Graph populated by the TFD with an instance of the attribute deadline, belonging to the TAX frame.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 20, |
|
"end": 28, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Question Analysis", |
|
"sec_num": "3.3." |
|
}, |
|
{ |
|
"text": "The morphosyntactic analysis of questions is carried out by the CoreNLP-it pipeline, whereas rule-based components are exploited for NER. GATE 2 and the Stanford To-kensRegex (Chang and Manning, 2014) are used to extract from questions the entities annotated with statistical components during the document analysis phase (See 3.2.1.). Given a set of frame attributes A, an attribute a \u2208 A is identified in a question by the co-occurrence of a frame trigger F t and a subset of the attribute triggers set T associated with it, such as A = {F t , T }, where T = {t 1 , ..., t n }. Triggers are grouped by several standard forms {s 1 , ..., s n }, such as S = {s 1 , ..., s n } (see Section 3.2.). Moreover, a subset Q of T is implicitly expressed on text by means of question foci. Thus, Q \u2282 T and S \u2282 T . The TFD module employed by FRAQUE for attribute extraction looks for a frame trigger F t to possibly associate the question with a frame F . For instance, in this phase the frame trigger for the TAX frame \"Municipality Tax\" is extracted from the question in Figure 5 . Then, the TFD searches for attribute triggers related to the TAX frame attributes. Different degrees of flexibility can be set for the attribute retrieving. A binary feature assigned to each trigger t i suggests if the trigger is compulsory for associating an attribute with the examined question (Miliani et al., 2019) . In the example in Figure 5 , the attribute extraction module detects only the generic trigger \"payment\", which led to associate the question with both the attributes deadline and methods of payment.", |
|
"cite_spans": [ |
|
{ |
|
"start": 175, |
|
"end": 200, |
|
"text": "(Chang and Manning, 2014)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1371, |
|
"end": 1393, |
|
"text": "(Miliani et al., 2019)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1063, |
|
"end": 1071, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1414, |
|
"end": 1422, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Question Analysis", |
|
"sec_num": "3.3." |
|
}, |
|
{ |
|
"text": "When the [payment] payment of the [Municipality Tax] tax is due? Figure 5 : Example of a user question containing the question focus (\"when\"). The tagged tokens are attribute triggers and tags correspond to their standard forms.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 65, |
|
"end": 73, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Question Analysis", |
|
"sec_num": "3.3." |
|
}, |
|
{ |
|
"text": "If no attribute is activated, a query expansion module checks if simple and complex terms extracted from questions are at least semantic neighbors of the triggers con-tained in the thesaurus. Semantic neighbors are computed within a distributional space trained with word2vec (Mikolov et al., 2013) on La Repubblica corpus ( Baroni and Mazzoleni, 2004) and PaWaC for administrative domain-specific knowledge. FRAQUE searches for the terms extracted from the question among the distributional space targets. Target words are lemmatized and combined for complex terms. Following the compositional property of word embeddings, each complex term vector consists of an element-wise sum of its word embedding elements (Hazem and Daille, 2018) . Semantic neighbors are then detected among the terms with the highest cosine similarity measure. Among these neighbors, FRAQUE searches for triggers.", |
|
"cite_spans": [ |
|
{ |
|
"start": 276, |
|
"end": 298, |
|
"text": "(Mikolov et al., 2013)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 325, |
|
"end": 352, |
|
"text": "Baroni and Mazzoleni, 2004)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 712, |
|
"end": 736, |
|
"text": "(Hazem and Daille, 2018)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Question Analysis", |
|
"sec_num": "3.3." |
|
}, |
|
{ |
|
"text": "To solve the potential ambiguity resulting from the attribute extraction process and to facilitate a connection between questions and attributes, we implemented a focus detection module. The question focus is expressed by interrogative adverbs, like \"how\", and by equivalent linguistic expressions composed by more than one token, such as \"in which way\". Each focus is associated with an attribute trigger. For instance \"how\" is linked to the trigger \"methods\", whereas the focus \"where\" is related to a trigger represented by a location named entity. The extracted focus is then involved in the question evaluation process. In Figure 5 , the question focus is \"when\", which is associated with TEs. Thus, the snippet containing the answer of the cited question must include a TE. The attribute including a date among its trigger is the attribute deadline, which is therefore associated with the question. If the focus extracted from the question is not connected to any frame attribute, or if no focus has been extracted from the question (as showed in Figure 6 ), a different procedure is followed. In this case, the attribute selected is the one with the highest Attribute Score (AS). The AS is computed for each candidate attribute selected by the attribute extraction module, and it is defined as:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 628, |
|
"end": 636, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1053, |
|
"end": 1061, |
|
"text": "Figure 6", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Question Analysis", |
|
"sec_num": "3.3." |
|
}, |
|
{ |
|
"text": "AS = |SQ| |ST | \u00d7 n i=1 cos |TQ| (1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Question Analysis", |
|
"sec_num": "3.3." |
|
}, |
|
{ |
|
"text": "where S Q is the set of the standard forms of all the triggers T Q extracted from the question and related to a certain attribute, such as S Q \u2282 S and T Q \u2282 T . AS is directly proportional to the average of the cosine similarity between the triggers in T Q and the triggers stored in the thesaurus. In this way, AS favors terms semantically closer to the triggers contained in the thesaurus, so that the noise resulting from query expansion process is reduced. Furthermore, AS does not consider only T Q , the set of all triggers found on the question. AS takes into consideration the ratio between trigger standard forms in S Q and T Q , because it better expresses the variety of triggers by which an attribute is described on the text.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Question Analysis", |
|
"sec_num": "3.3." |
|
}, |
|
{ |
|
"text": "Finally, the extraction and ranking of candidate answers are carried out in the answer analysis (Dwivedi and Singh, 2013 ) (see Figure 1 ). The answer returned by FRAQUE is a snippet that is detected walking through the KG nodes, following a path indicated by the information extracted from the question during the question analysis phase. Once the question is analysed we identify three different scenarios:", |
|
"cite_spans": [ |
|
{ |
|
"start": 96, |
|
"end": 120, |
|
"text": "(Dwivedi and Singh, 2013", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 128, |
|
"end": 136, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Answer Analysis", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "\u2022 The attribute scenario: the question is associated with an attribute;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Answer Analysis", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "\u2022 The frame scenario: the question is linked to a frame, can be specified;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Answer Analysis", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "\u2022 The residual scenario: the question cannot be related to any attribute or frame.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Answer Analysis", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "In the first scenario, FRAQUE uses the question analysis results to query the KG and retrieve a snippet. Consider the question in Figure 5 , which is related to the attribute deadline of the TAX frame, and which contains the frame trigger \"Municipality Tax\". FRAQUE looks at the root nodes inside the graph and selects the one labelled by \"Tax\". Then, it looks for \"Municipality Tax\" among the frame instances and checks for the presence of an attribute node tagged with \"deadline\" afterwards. At this point, if the requested information has to be extracted from the whole corpus, FRAQUE considers the snippets stored with each document node and returns the one with the highest DS.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 130, |
|
"end": 138, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Answer Analysis", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "Otherwise, if the information has to be searched in a specific document (e.g., \"Rome Municipality Act\"), FRAQUE searches that document among those connected to the considered attribute, and returns the snippet associated with it. In the frame scenario, only a frame trigger has been extracted from the question, but no focus or attribute trigger can disambiguate the user's information request. In this case, FRAQUE returns the document or the set of documents connected to the highest number of attribute nodes for the detected frame. Such documents are in fact supposed to contain a more complete knowledge about the frame itself. In the residual scenario, triggers can not be detected neither among the question terms, nor among their semantic neighbours. In that case, FRAQUE extracts all metadata from the question, such as complex terms and entities, and uses them to query a document base indexed on Lucene 3 . In this database, the documents are indexed with terms, entities and topics related to the administrative domain. Terms and topics are structured in an ontology built by domain experts and employed for the platform SemplicePA (Miliani et al., 2017) . FRAQUE returns those documents where the extracted terms and entities co-occur, by exploiting AND queries based on a list of pre-defined groups of metadata organized by type (i.e., terms, entities, and topic).", |
|
"cite_spans": [ |
|
{ |
|
"start": 1144, |
|
"end": 1166, |
|
"text": "(Miliani et al., 2017)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Answer Analysis", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "We evaluated FRAQUE on the administrative domain. In particular, we detected two frames: (i) the domain-specific TAX frame, and (ii) the EVENT frame, concerning the events taking place in a given city area, which we considered as a more general purpose frame (see Table 2 ). FRAQUE's outcomes are assessed on To test our QAS, we selected 50 questions among those gathered through the questionnaire employed in the design process (see Section 3.1.), and among the FAQ reported on several Italian Municipality web sites. More precisely, we focused on a subset of questions referring to the target frames attribute (i.e., those asking information about events and taxes) and on another subset of questions not related to them. This way, we were able to evaluate the performances of the system for the three scenarios outlined in Section 3.4. Table 2 reports the frame attributes on which the performances of FRAQUE have been assessed.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 264, |
|
"end": 271, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 839, |
|
"end": 846, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation And Results", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "Methods of payment Cost Table 2 : Attributes of the EVENT and TAX frames.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 24, |
|
"end": 31, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Where Deadline When", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We evaluated FRAQUE on its ability to return (i) The right answer type; (ii) The right answer content. For what concern the first point, the goal is to assess whether the system is able to return the expected output type based on the scenarios described in Section 3.4. (i.e., attribute, frame, and residual). Traditional test accuracy metrics were employed, like F 1 score, which takes into consideration the overlap between the system outcomes and the correct answer type for each question.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Where Deadline When", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "F1-SCORE macroAVG 0.69 0.57 0.61 microAVG 0.72 0.72 0.72 Table 3 : Performances of FRAQUE for what concern the right answer type returned according to the detected scenario associated with the question. Table 3 shows relatively low results for recall. Such a score is affected by the cases in which FRAQUE could not provide an answer to the question due to several reasons including (i) the absence of the information requested by the user and (ii) its ability to find the proper match within the question and the documents frames. With regard to the second point (i.e., the answer content) a different evaluation was performed. A domain expert was asked to decide whether the returned snippets or documents (according to the detected scenario) contain the right answer to the questions. The metrics we used differ from one scenario to another (see Section 3.4.). Table 4 : Evaluation of the content of the answers returned by FRAQUE according to the detected scenario. In the frame scenario, the system detected a frame, but no attribute related to it. In the attribute scenario, FRAQUE extracted at least a frame and an attribute from the text of the question. In the residual scenario, no frame could be extracted from the question text.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 57, |
|
"end": 64, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 203, |
|
"end": 210, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 864, |
|
"end": 871, |
|
"text": "Table 4", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "PRECISION RECALL", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "When the question can be associated with an attribute, as in the first scenario, we employed the Mean Reciprocal Rank (MRR). MRR is a metric introduced in the TREC Q/A track in 1999 for factoid question answering system evaluation (Jurafsky and Martin, 2019) . For a set of questions N , it was computed on a short list of snippets containing possible answers, ranked by SS or DS (see Section 3.2.). Each question is then scored according to the reciprocal of the rank of the first correct answer. Given a set of questions Q:", |
|
"cite_spans": [ |
|
{ |
|
"start": 231, |
|
"end": 258, |
|
"text": "(Jurafsky and Martin, 2019)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "PRECISION RECALL", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "M RR = 1 |Q| |Q| i=1 1 ranki (2)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "PRECISION RECALL", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where rank i refers to the rank position of the first relevant document for the i th query. As for the attribute scenario, in table 5 we report a deeper evaluation over the various attributes. Table 5 : Evaluation of the answers to the questions related to EVENT and TAX frame attributes, according to the attribute scenario. The score is computed on the returned snippets.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 193, |
|
"end": 200, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "PRECISION RECALL", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "It is important to notice that such results are highly affected by the cost attribute, for which the system was not able to find correct answers. Such errors are mainly due to a wrong indexed snippet for the corresponding attribute. Because of the high number of municipality acts stored in our database, most of the events have been extracted from this kind of documents. In most cases, these acts report how much the municipality spent to fund the events, instead of the ticket cost of the event. It is clear that we expect completely different results by evaluating the system on a knowledge base where information related to events is mainly extracted from social media posts, where the price of the ticket to participate in a certain event is usually specified. In the frame scenario, the given question could not be associated with any attribute, so the documents containing rele-vant snippets for the detected frame are returned. Here, the MRR is calculated on the list of documents ranked by the number of the relevant snippets extracted from them and associated with the frame attributes. Table 6 shows the results for each frame concerning this scenario.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1098, |
|
"end": 1105, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "EVENT", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "0.50 TAX 1 macroAVG 0.75 Table 6 : Evaluation of the answers to questions related to EVENT and TAX frames, according to the frame scenario. The score is computed on the returned documents.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 25, |
|
"end": 32, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "FRAME MRR EVENT", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The low performance of the system in retrieving the information related to the EVENT frame is mainly caused by some features of the indexing process. TFD indexes a document only if it contains information relevant for at least one attribute. For this reason, even though the TFD stored an event in the graph, no document may be associated with it and thus returned.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "FRAME MRR EVENT", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In the residual scenario, no frame is associated to the question and the system queries a Lucene database with indomain terms, entities, and topics extracted from the question text. In this case, FRAQUE returns up to 5 documents. Since the results are not ranked, the system performance was evaluated considering if at least one of the returned documents was actually relevant for the question. The employed evaluation metric is a variant of the precision: we considered as true positive only those cases where FRAQUE returned at least a relevant document for each query (seeTable 4). We decided to consider this metric also taking into consideration the QAS usage context, where the real goal is to guarantee that the information the user needs is among the returned documents. The results showed that, in some cases, the queries returned no answers. On the one hand, this happens because we decided to maximize the quality of the returned results by employing AND queries in querying the Lucene database. Specifically, output documents were required to contain all (or pre-defined groups) of the relevant metadata identified in the text of the question. However, this way, the system never retrieves documents containing different combinations of terms, entities or topics extracted from the question. On the other hand, the errors are caused by the absence of documents related to the question topic. By evaluating FRAQUE without considering questions for which the Lucene database does not contain the needed information, the precision increases by 29%, reaching overall a performance of 0, 76.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "FRAME MRR EVENT", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this paper we introduced FRAQUE, a question answering system based on semantic frames. FRAQUE structures textual data into frames so that they can be queried by means of natural language. This solution is based on an IE module for document analysis, namely the TFD (Miliani et al., 2019) , allowing for the indexing of documents by text frames. Given this kind of metadata, FRAQUE is able to detect correct answers contained into document snippets and to associate them to frame attributes stored in a KG. FRAQUE has been integrated into a Dialogue Management System (DMS) as the question answering component of a chatbot, designed to give information about Italian Public Administrations. However, in-domain linguistic analysis and resources in FRAQUE are easily portable to other domains, thanks to its statistical components, such as word embeddings, adopted in the query expansion module. We evaluated FRAQUE in several real case scenarios obtaining encouraging results. The results calculated over the frames annotated with the TFD module reach an average MRR of 0, 667, whereas FRAQUE reaches a 0, 59 precision score in those questions not answered exploiting frames. Of course, there is still room for improvement, but if we consider only the cases where TFD performs well, FRAQUE reaches even higher results. By looking at these outcomes, we are led to believe that improving the TFD performances, the FRAQUE's ones can be drastically improved as well.", |
|
"cite_spans": [ |
|
{ |
|
"start": 268, |
|
"end": 290, |
|
"text": "(Miliani et al., 2019)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "In the near future we plan to compare the obtained results with those of available related systems, at least on the first of the scenarios detected, where document snippets are returned as answer. Moreover, further development of our work will focus on the conversion of FRAQUE thesaurus to open standards, such as the Resource Description Framework (RDF), with the consequent adaptation of FRAQUE modules to this data model. This could ease the application of FRAQUE on existing resources, as well as facilitate other frameworks to exploit FRAQUE in-domain thesaurus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "https://gate.ac.uk/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://lucene.apache.org/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This research has been supported by the Project \"SEM il Chattadino\" (SEM), funded by Regione Toscana (POR CreO Fesr 2014-2020). Besides the CoLing Lab, project partners include the companies ETI3 s.r.l. (coordinator), BNova s.r.l. and Rigel Engineering s.r.l.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "acknowledgement", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Semantic parsing on freebase from questionanswer pairs", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Berant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Chou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Frostig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 2013 Conference on Empirical methods in natural language processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Berant, J., Chou, Andrew Frostig, R., and Liang, P. (2013). Semantic parsing on freebase from question- answer pairs. In Proceedings of the 2013 Conference on Empirical methods in natural language processing (EMLNP).", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "CoreNLP-it: A UD pipeline for Italian based on Stanford CoreNLP", |
|
"authors": [ |
|
{ |
|
"first": "Passaro", |
|
"middle": [], |
|
"last": "Bondielli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lenci", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "CliCit", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bondielli, Passaro, and Lenci. (2018). CoreNLP-it: A UD pipeline for Italian based on Stanford CoreNLP. In CliC- it 2018.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "An analysis of the askmsr question-answering system", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Brill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Dumais", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Banko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of conference on Empirical methods in natural language processing (EMLNP). Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Brill, E., Dumais, S., and Banko, M. (2002). An analysis of the askmsr question-answering system. In Proceed- ings of conference on Empirical methods in natural lan- guage processing (EMLNP). Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Algoritmi su carta", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Carloni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "25", |
|
"issue": "", |
|
"pages": "363--392", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Carloni, E. (2019). Algoritmi su carta. politiche di dig- italizzazione e trasformazione digitale delle amminis- trazioni. Diritto pubblico, 25(2):363-392.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Tokensregex: Defining cascaded regular expressions over tokens", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"X" |
|
], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chang, A. X. and Manning, C. D. (2014). Token- sregex: Defining cascaded regular expressions over to- kens. Stanford University Computer Science Technical Reports. CSTR, 2:2014.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Reading wikipedia to answer open-domain questions", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Fisch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Bordes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chen, D., Fisch, A., Weston, J., and Bordes, A. (2017). Reading wikipedia to answer open-domain questions. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "A knowledge-based approach to question-answering", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Thompson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Porter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Proceedings of AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "43--51", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Clark, P., Thompson, J., and Porter, B. (1999). A knowledge-based approach to question-answering. In Proceedings of AAAI, pages 43-51.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "A simple question answering system", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Cooper", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Ruger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Text REtrieval Conference (TREC)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cooper, R. J. and Ruger, S. M. (2000). A simple ques- tion answering system. In Text REtrieval Conference (TREC).", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "T2k\u02c62: a system for automatically extracting and organizing knowledge from texts", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Dell'orletta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Venturi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Cimino", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Montemagni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC-2014)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2062--2070", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dell'Orletta, F., Venturi, G., Cimino, A., and Montemagni, S. (2014). T2k\u02c62: a system for automatically extract- ing and organizing knowledge from texts. In Proceed- ings of the Ninth International Conference on Language Resources and Evaluation (LREC-2014), pages 2062- 2070.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Ensemble system for part-ofspeech tagging", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Dell'orletta", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of EVALITA", |
|
"volume": "9", |
|
"issue": "", |
|
"pages": "1--8", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dell'Orletta, F. (2009). Ensemble system for part-of- speech tagging. Proceedings of EVALITA, 9:1-8.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M.-W", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "NAACL HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Devlin, J., Chang, M.-W., Lee, K., and Toutanova, K. (2019). Bert: Pre-training of deep bidirectional trans- formers for language understanding. In NAACL HLT, pages 4171-4186.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Research and reviews in question answering system", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Dwivedi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Procedia Technology", |
|
"volume": "1", |
|
"issue": "10", |
|
"pages": "417--424", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dwivedi, S. K. and Singh, V. (2013). Research and re- views in question answering system. Procedia Technol- ogy, 1(10):417-424.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Paraphrasedriven learning for open question answering", |
|
"authors": [ |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Fader", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Etzioni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fader, Anthony, Z. L. and Etzioni, O. (2013). Paraphrase- driven learning for open question answering. In Pro- ceedings of the 51st Annual Meeting of the Association for Computational Linguistics. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Build watson: an overview of deepqa for the jeopardy! challenge", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Ferrucci", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of 19th International Conference on Parallel Architectures and Compilation Techniques (PACT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ferrucci, D. (2010). Build watson: an overview of deepqa for the jeopardy! challenge. In Proceedings of 19th International Conference on Parallel Architectures and Compilation Techniques (PACT). IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Designing for usability: key principles and what designers think", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Gould", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1985, |
|
"venue": "Communications of the ACM", |
|
"volume": "25", |
|
"issue": "3", |
|
"pages": "300--311", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gould, J. D. and Lewis, C. (1985). Designing for usabil- ity: key principles and what designers think. Communi- cations of the ACM, 25(3):300-311.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Baseball: an automatic question-answerer", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [ |
|
"F J" |
|
], |
|
"last": "Green", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Chomsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Laughery", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1961, |
|
"venue": "Awestern joint IRE-AIEE-ACM computer conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Green, B. F. J., Wolf, A. K., Chomsky, C., and Laughery, K. (1961). Baseball: an automatic question-answerer. In Awestern joint IRE-AIEE-ACM computer conference. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Word embedding approach for synonym extraction of multi-word terms", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Hazem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Daille", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hazem, A. and Daille, B. (2018). Word embedding ap- proach for synonym extraction of multi-word terms. In Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), Miyazaki, Japan, May. European Language Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Question answering in webclopedia", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Gerber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "U", |
|
"middle": [], |
|
"last": "Hermjakob", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Junk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C.-Y", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Text REtrieval Conference (TREC)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hovy, E., Gerber, L., Hermjakob, U., Junk, M., , and Lin, C.-Y. (2000). Question answering in webclopedia. In Text REtrieval Conference (TREC).", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Speech and language processing", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Martin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jurafsky, D. and Martin, J. H. (2019). Speech and language processing. Third edition draft on webpage: https: //web.stanford.edu/\u02dcjurafsky/slp3/. Ac- cessed: 3 July 2019.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "An exploration of the principles underlying redundancy-based factoid question answering", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "ACM Transactions on Information Systems (TOIS)", |
|
"volume": "25", |
|
"issue": "2", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lin, J. (2007). An exploration of the principles underly- ing redundancy-based factoid question answering. ACM Transactions on Information Systems (TOIS), 25(2):6.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "The stanford corenlp natural language processing toolkit", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Surdeanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Bauer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Finkel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Bethard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Mcclosky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of 52nd annual meeting of the association for computational linguistics: system demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "55--60", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Manning, C., Surdeanu, M., Bauer, J., Finkel, J., Bethard, S., and McClosky, D. (2014). The stanford corenlp nat- ural language processing toolkit. In Proceedings of 52nd annual meeting of the association for computational lin- guistics: system demonstrations, pages 55-60.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Distributed representations of words and phrases and their compositionality", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of NIPS 2013, 26th Conference on Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "171--178", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mikolov, T., Sutskever, I., Chen, K., Corrado, G. S., and Dean, J. (2013). Distributed representations of words and phrases and their compositionality. In Proceedings of NIPS 2013, 26th Conference on Advances in Neural Information Processing Systems, pages 171-178, Lake Tahoe, Nevada, USA.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Semplicepa: Semantic instruments for public administrators and citizen", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Miliani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Passaro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Gabbolini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Passaro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Leci", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Battistelli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "GARR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Miliani, M., Passaro, L., Gabbolini, A., Passaro, L., Leci, A., and Battistelli, R. (2017). Semplicepa: Seman- tic instruments for public administrators and citizen. In GARR.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Text frame detector: Slot filling based on domain knowledge bases", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Miliani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Passaro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Lenci", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings CliC-it 2019, 6th Italian Conference of Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Miliani, M., Passaro, L. C., and Lenci, A. (2019). Text frame detector: Slot filling based on domain knowledge bases. In Proceedings CliC-it 2019, 6th Italian Confer- ence of Computational Linguistics, Bari.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "A framework for representing knowl", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Minsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1974, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Minsky, M. (1974). A framework for representing knowl- edge. Massachusetts Institute of Technology, Cam- bridge, MA.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Answer filtering via text categorization in question answering systems", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Moschitti", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of 15th IEEE International Conference on Tools with Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Moschitti, A. (2003). Answer filtering via text categoriza- tion in question answering systems. In Proceedings of 15th IEEE International Conference on Tools with Arti- ficial Intelligence. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Universal dependencies 1.2. LIN-DAT/CLARIN digital library at the Institute of Formal and Applied Linguistics (\u00daFAL)", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Faculty of Mathematics and Physics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nivre, J. e. a. (2015). Universal dependencies 1.2. LIN- DAT/CLARIN digital library at the Institute of Formal and Applied Linguistics (\u00daFAL), Faculty of Mathemat- ics and Physics, Charles University.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "A review of question answering systems", |
|
"authors": [ |
|
{ |
|
"first": "Bolanle", |
|
"middle": [], |
|
"last": "Ojokoh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Ans Adebisi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Journal of Web Engineering", |
|
"volume": "17", |
|
"issue": "8", |
|
"pages": "717--758", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ojokoh, Bolanle ans Adebisi, E. (2018). A review of ques- tion answering systems. Journal of Web Engineering, 17(8):717-758.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Extracting terms with Extra. Computerised and Corpus-based Approaches to Phraseology: Monolingual and Multilingual Perspectives", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Passaro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Lenci", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "188--196", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Passaro, L. C. and Lenci, A. (2016). Extracting terms with Extra. Computerised and Corpus-based Approaches to Phraseology: Monolingual and Multilingual Perspec- tives, pages 188-196.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Informed pa: A ner for the italian public administration domain", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Passaro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Lenci", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Gabbolini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Fourth Italian Conference on Computational Linguistics CLiC-it", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "246--251", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Passaro, L. C., Lenci, A., and Gabbolini, A. (2017). In- formed pa: A ner for the italian public administration domain. In Fourth Italian Conference on Computational Linguistics CLiC-it, pages 246-251.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Open-Domain Question Answering from Large Text Collections", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Pa\u015fca", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pa\u015fca, M. (2003). Open-Domain Question Answering from Large Text Collections. CSLI.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Question answering system, approaches and techniques: A review", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Pundge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Khillare", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Namrata", |
|
"middle": [], |
|
"last": "Mahender", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "International Journal of Computer Applications", |
|
"volume": "141", |
|
"issue": "3", |
|
"pages": "975--8887", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pundge, A. M., Khillare, S. A., and Namrata Mahender, C. (2016). Question answering system, approaches and techniques: A review. International Journal of Com- puter Applications, 141(3):0975-8887.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Learning surface text patterns for a question answering system", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Ravichandran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Association for Computational Linguistics conference (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ravichandran, D. and Hovy, E. (2002). Learning surface text patterns for a question answering system. In Asso- ciation for Computational Linguistics conference (ACL). Association for Computational Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null, |
|
"text": "The Knowledge Graph structure employed by the TFD(Miliani et al., 2019)." |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null, |
|
"text": "Example of a snippet expressing an instance of the TAX frame. It contains relevant information for both the deadline and the methods of payment attributes." |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null, |
|
"text": "Can I [pay] payment the [Municipality Tax] tax with [postal order] payment\u2212f orm ? Example of a user question. The tagged tokens are attribute triggers and tags correspond to their standard forms." |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"text": "The [Municipality Tax] tax [disbursement] payment must be made through [wire transfer] payment\u2212f orm or [postal order] payment\u2212f orm in two [installments] sum : [down payment] sum by [June 18 th ] date and [balance] sum by [December 17 th ] date .", |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"num": null |
|
}, |
|
"TABREF2": { |
|
"html": null, |
|
"text": "", |
|
"type_str": "table", |
|
"content": "<table><tr><td>reports the</td></tr><tr><td>FRAQUE's performances according to each scenario.</td></tr></table>", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |