|
{ |
|
"paper_id": "U15-1010", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T03:10:06.349273Z" |
|
}, |
|
"title": "Domain Adaption of Named Entity Recognition to Support Credit Risk Assessment", |
|
"authors": [ |
|
{ |
|
"first": "Julio", |
|
"middle": [], |
|
"last": "Cesar", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The University of Melbourne", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Salinas", |
|
"middle": [], |
|
"last": "Alvarado", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The University of Melbourne", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Karin", |
|
"middle": [], |
|
"last": "Verspoor", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The University of Melbourne", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Timothy", |
|
"middle": [ |
|
"2015" |
|
], |
|
"last": "Baldwin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The University of Melbourne", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Domain", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Adaption of Named Entity Recognition to Support Credit Risk Assessment . In Proceedings of Australasian Language Technology", |
|
"institution": "", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Risk assessment is a crucial activity for financial institutions because it helps them to determine the amount of capital they should hold to assure their stability. Flawed risk assessment models could return erroneous results that trigger a misuse of capital by banks and in the worst case, their collapse. Robust models need large amounts of data to return accurate predictions, the source of which is text-based financial documents. Currently, bank staff extract the relevant data by hand, but the task is expensive and timeconsuming. This paper explores a machine learning approach for information extraction of credit risk attributes from financial documents, modelling the task as a named-entity recognition problem. Generally, statistical approaches require labelled data for learn the models, however the annotation task is expensive and tedious. We propose a solution for domain adaption for NER based on out-of-domain data, coupled with a small amount of in-domain data. We also developed a financial NER dataset from publicly-available financial documents.", |
|
"pdf_parse": { |
|
"paper_id": "U15-1010", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Risk assessment is a crucial activity for financial institutions because it helps them to determine the amount of capital they should hold to assure their stability. Flawed risk assessment models could return erroneous results that trigger a misuse of capital by banks and in the worst case, their collapse. Robust models need large amounts of data to return accurate predictions, the source of which is text-based financial documents. Currently, bank staff extract the relevant data by hand, but the task is expensive and timeconsuming. This paper explores a machine learning approach for information extraction of credit risk attributes from financial documents, modelling the task as a named-entity recognition problem. Generally, statistical approaches require labelled data for learn the models, however the annotation task is expensive and tedious. We propose a solution for domain adaption for NER based on out-of-domain data, coupled with a small amount of in-domain data. We also developed a financial NER dataset from publicly-available financial documents.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "In the years 2007-2008, the GFC (Global Financial Crisis) affected a vast number of countries around the world, causing losses of around USD$33 trillion and the collapse of big-name banks (Clarke, 2010) . Experts identified that one of the main causes of the GFC was the use of poor financial models in risk assessment (Clarke, 2010; news.com.au, 2010; Debelle, 2009) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 188, |
|
"end": 202, |
|
"text": "(Clarke, 2010)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 319, |
|
"end": 333, |
|
"text": "(Clarke, 2010;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 334, |
|
"end": 352, |
|
"text": "news.com.au, 2010;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 353, |
|
"end": 367, |
|
"text": "Debelle, 2009)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Risk assessment helps banks to estimate the amount of capital they should keep at hand to promote their stability and at the same time to protect their clients. Poor risk assessment models tend to overestimate the capital required, leading banks to make inefficient use of their capital, or underestimate the capital required, which could lead to banks collapsing in a financial crisis.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Financial documents such as contracts and loan agreements provide the information required to perform the risk assessment. These texts hold relevant details that feed into the assessment process, including: the purpose of the agreement, amount of loan, and value of collateral. Figure 1 provides a publicly available example of a loan agreement, as would be used in risk assessment.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 278, |
|
"end": 286, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Currently, bank staff manually extract the information from such financial documents, but the task is expensive and time-consuming for three main reasons: (1) all documents are in unstructured, textual form; (2) the volume of \"live\" documents is large, numbering in the millions of documents for a large bank; and (3) banks are continuously adding new information to the risk models, meaning that they potentially need to extract new fields from old documents they have previously analyzed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Natural language processing (NLP) potentially offers the means to semi-automatically extract information required for risk assessment, in the form of named entity recognition (NER) over fields of interest in the financial documents. However, while we want to use supervised NER models, we also want to obviate the need for large-scale annotation of financial documents. The primary focus of this paper is how to build supervised NER models to extract information from financial agreements based on pre-existing out-of-domain data with partially-matching labelled data, and small amounts of in-domain data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "There are few public datasets in the financial domain, due to the privacy and commercial value of the data. In the interest of furthering research on information extraction in the financial domain, we Figure 1 : Example of a loan agreement. Relevant information that is used by risk assessment models is highlighted. The example is taken from a loan agreement that has been disclosed as part of an SEC hearing, available at http://www.sec.gov/Archives/edgar/data/1593034/ 000119312514414745/d817818dex101.htm construct an annotated dataset of public-domain financial agreements, and use this as the basis of our experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 201, |
|
"end": 209, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This paper describes an approach for domain adaption that includes a small amount of target domain data into the source domain data. The results obtained encourage the use of this approach in cases where the amount of target data is minimal.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Most prior approaches to information extraction in the financial domain make use of rule-based methods. Farmakiotou et al. (2000) extract entities from financial news using grammar rules and gazetteers. This rule-based approach obtained 95% accuracy overall, at a precision and recall of 78.75%. Neither the number of documents in the corpus nor the number of annotated samples used in the work is mentioned, but the number of words in the corpus is 30,000 words for training and 140,000 for testing. The approach involved the creation of rules by hand; this is a time-consuming task, and the overall recall is low compared to other extraction methods.", |
|
"cite_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 129, |
|
"text": "Farmakiotou et al. (2000)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Another rule-based approach was proposed by Sheikh and Conlon (2012) for extracting information from financial data (combined quarterly reports from companies and financial news) with the aim of assisting in investment decision-making.", |
|
"cite_spans": [ |
|
{ |
|
"start": 44, |
|
"end": 68, |
|
"text": "Sheikh and Conlon (2012)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The rules were based on features including exact word match, part-of-speech tags, orthographic features, and domain-specific features. After creating a set of rules from annotated examples, they tried to generalize the rules using a greedy search algorithm and also the Tabu Search algorithm. They obtained the best performance of 91.1% precision and 83.6% recall using the Tabu Search algorithm.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The approach of Farmakiotou et al. 2000is similar to our approach in that they tried to address an NER problem with financial data. However, their data came from financial news rather than the financial agreements, as targeted in our work. The focus of Sheikh and Conlon (2012) is closer to that in this paper, in that they make use of both financial news and corporate quarterly reports. However, their extraction task does not consider financial contracts, which is the key characteristic of our problem setting.", |
|
"cite_spans": [ |
|
{ |
|
"start": 253, |
|
"end": 277, |
|
"text": "Sheikh and Conlon (2012)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Somewhat further afield -but related in the sense that financial agreements stipulate the legal terms of a financial arrangement -is work on information extraction in the legal domain. Moens et al. (1999) used information extraction to obtain relevant details from Belgian criminal records with the aim of generating abstracts from them. The approach takes advantage of discourse analysis to find the structure of the text and linguistic forms, and then creates text grammars. Finally, the approach uses a parser to process the document content. Although the authors do not present results, they argue that when applied to a test set of 1,000 criminal cases, they were able to identify the required information.", |
|
"cite_spans": [ |
|
{ |
|
"start": 185, |
|
"end": 204, |
|
"text": "Moens et al. (1999)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In order to reduce the need for annotation, we explore domain adaptation of an information extraction system using out-of-domain data and a small amount of in-domain data. Domain adaptation for named entity recognition techniques has been explored widely in recent years. For instance, Jiang and Zhai (2006) approached the problem by generalizing features across the source and target domain to way avoid overfitting. Mohit and Hwa (2005) proposed a semi-supervised method combining a naive Bayes classifier with the EM algorithm, applied to features extracted from a parser, and showed that the method is robust over novel data. Blitzer et al. (2006) induced a correspondence between features from a source and target domain based on structural correspondence learn-ing over unlabelled target domain data. Qu et al. (2015) showed that a graph transformer NER model trained over word embeddings is more robust cross-domain than a model based on simple lexical features.", |
|
"cite_spans": [ |
|
{ |
|
"start": 286, |
|
"end": 307, |
|
"text": "Jiang and Zhai (2006)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 418, |
|
"end": 438, |
|
"text": "Mohit and Hwa (2005)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 630, |
|
"end": 651, |
|
"text": "Blitzer et al. (2006)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 807, |
|
"end": 823, |
|
"text": "Qu et al. (2015)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Our approach is based on large amounts of labelled data from a source domain and small amounts of labelled data from the target domain (i.e. financial agreements), drawing inspiration from previous research that has shown that using a modest amount of labelled in-domain data to perform transfer learning can substantially improve classifier accuracy (Duong et al., 2014) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 351, |
|
"end": 371, |
|
"text": "(Duong et al., 2014)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Named entity recognition (NER) is the task of identifying and classifying token-level instances of named entities (NEs), in the form of proper names and acronyms of persons, places or organizations, as well as dates and numeric expressions in text (Cunningham, 2005; Abramowicz and Piskorski, 2003; Sarawagi, 2008) . In the financial domain, example NE types are LENDER, BORROWER, AMOUNT, and DATE.", |
|
"cite_spans": [ |
|
{ |
|
"start": 248, |
|
"end": 266, |
|
"text": "(Cunningham, 2005;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 267, |
|
"end": 298, |
|
"text": "Abramowicz and Piskorski, 2003;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 299, |
|
"end": 314, |
|
"text": "Sarawagi, 2008)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We build our supervised NER models using conditional random fields (CRFs), a popular approach to sequence classification (Lafferty et al., 2001; Blunsom, 2007) . CRFs model the conditional probability p(s|o) of labels (states) s given the observations o as in Equation 1, where t is the index of words in observation sequence o, each k is a feature, w k is the weight associated with the feature k, and Z w (o) is a normalization constant.", |
|
"cite_spans": [ |
|
{ |
|
"start": 121, |
|
"end": 144, |
|
"text": "(Lafferty et al., 2001;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 145, |
|
"end": 159, |
|
"text": "Blunsom, 2007)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p(s|o) = exp( t k w k f k (s t\u22121 , s t , o, t)) Z w (o)", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Background", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "4 Methods", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In order to evaluate NER over financial agreements, we annotated a dataset of financial agreements made public through U.S. Security and Exchange Commission (SEC) filings. Eight documents (totalling 54,256 words) were randomly selected for manual annotation, based on the four NE types provided in the CoNLL-2003 dataset: LOCATION (LOC), ORGANISATION (ORG), PERSON (PER), and MISCELLANEOUS (MISC). The annotation was carried out using the Brat annotation tool (Stenetorp et al., 2012) . All documents were pre-tokenised and part-of-speech (POS) tagged using NLTK (Bird et al., 2009) . As part of the annotation, we automatically tagged all instances of the tokens lender and borrower as being of entity type PER. We have made this dataset available in CoNLL format for research purposes at: http://people.eng.unimelb.edu.au/ tbaldwin/resources/finance-sec/.", |
|
"cite_spans": [ |
|
{ |
|
"start": 460, |
|
"end": 484, |
|
"text": "(Stenetorp et al., 2012)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 563, |
|
"end": 582, |
|
"text": "(Bird et al., 2009)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "For the training set, we use the CoNLL-2003 English data, which is based on Reuters newswire data and includes part-of-speech and chunk tags (Tjong Kim Sang and De Meulder, 2003) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 141, |
|
"end": 178, |
|
"text": "(Tjong Kim Sang and De Meulder, 2003)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The eight financial agreements were partitioned into two subsets of five and three documents, which we name \"FIN5\" and \"FIN3\", respectively. The former is used as training data, while the latter is used exclusively for testing. Table 1 summarizes the corpora.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 228, |
|
"end": 235, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "For all experiments, we used the CRF++ toolkit (Kudo, 2013), with the following feature set (optimized over the CoNLL-2003 development set):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u2022 Word features: the word itself; whether the word starts with an upper case letter; whether the word has any upper case letters other than the first letter; whether the word contains digits or punctuation symbols; whether the word has hyphens; whether the word is all lower or upper case. \u2022 Word shape features: a transformation of the word, changing upper case letters to X, lower case letters to x, digits to 0 and symbols to #. \u2022 Penn part-of-speech (POS) tag.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u2022 Stem and lemma.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u2022 Suffixes and Prefixes of length 1 and 2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We first trained and tested directly on the CoNLL-2003 data, resulting in a model with a precision of 0.833, recall of 0.824 and F1-score of 0.829 (Experiment1), competitive with the start-of-theart for the task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup and Results", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The next step was to experiment with the financial data. For that, first we applied the CoNLL-2003 model directly to FIN3. Then, in order to improve the results for the domain adaption, we trained a new model using the CONLL +FIN5 data set, and test this model against the FIN3 dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup and Results", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "A summary of the experimental results over the financial data sets is presented in Table 2 . Table 2 summarizes the results of directly applying the model obtained by training only over out-of-domain data to the two financial data sets. The difference in the domain composition of the CONLL data (news) and the financial documents can be observed in these results. With out-ofdomain test data, a precision of 0.247 and a recall of 0.132 (Experiment2) was observed, while testing with in-domain data achieved a precision of 0.833 and recall of 0.824 (Experiment1). As a solution to the difference in the nature of the sources in the context of limited annotated in-domain data, we experimented with simple domain adaptation, by including into the source domain (CONLL) data a small amount of the target domain data -i.e. including data from FIN5-generating a new training data set (CONLL +FIN5). When trained over this combined data set, the results increased substantially, obtaining a precision of 0.828, recall of 0.770 and F-score of 0.798 (Experiment3).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 83, |
|
"end": 90, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 93, |
|
"end": 100, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Setup and Results", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "As additional analysis, in Figure 2 , we plot learning curves based on F-score obtained for Ex-periment2 and Experiment3 as we increase the training set (in terms of the number of sentences). We can see that the F-score increases slightly with increasing amounts of pure CONLL data (Exper-iment2), but that in the case of the mixed training data (Experiment3), the results actually drop as we add more CONLL data. Figure 3 shows the learning curves for Experi-ment3 and Experiment4, as we add more financial data. Here, in the case of Experiment3, we start out with all of the CONLL data, and incrementally add FIN5. We can see that the more financial data we add, the more the F-score improves, with a remarkably constant absolute difference in F-score between the two experiments for the same amount of in-domain data. That is, even for as little as 100 training sentences, the CONLL data degrades the overall F-score. Confusion matrices for the results of the predictions of Experiment3 are shown in Table 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 27, |
|
"end": 35, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 414, |
|
"end": 422, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF2" |
|
}, |
|
{ |
|
"start": 1003, |
|
"end": 1010, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Analysis of the errors in the confusion matrix reveals that the entity type MISC has perfect recall over the financial dataset. Following MISC, PER is the entity type with the next best recall, at over 0.9. However, generally the model tends to suffer from a high rate of false positives for the entities LOC and ORG, affecting the precision of those classes and the overall performance of the model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "One interesting example of error in the output of the model is when the tokens refer to an address. One example is the case of 40 Williams Street, where the correct label is LOC but the model predicts the first token (40) to be NANE and the other two tokens to be an instance of PER (i.e. Williams Street is predicted to be a person).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In the model generated with just the CONLL data, one notable pattern is consistent false positives on tokens with initial capital letters; for example, the model predicts both Credit Extensions and Repayment Period to be instances of ORG, though in the gold standard they don't belong to any entity type. This error was reduced drastically through the addition of the in-domain financial data in training, improving the overall performance of the model. Ultimately, the purely in-domain training stratagem in Experiment4 outperforms the mixed data setup (Experiment3), indicating that domain context is critical for the task. Having said that, the results of our study inform the broader question of out-of-domain applicability of NER models. Furthermore, they point to the value of even a small amount of in-domain training data (Duong et al., 2014) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 830, |
|
"end": 850, |
|
"text": "(Duong et al., 2014)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Risk assessment is a crucial task for financial institutions such as banks because it helps to estimate the amount of capital they should hold to promote their stability and protect their clients. Manual extraction of relevant information from text- Table 3 : Confusion matrix for the predictions over FIN3 using the model from Experiment3, including the precision and recall for each class (\"NANE\" = Not a Named Entity).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 250, |
|
"end": 257, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "based financial documents is expensive and timeconsuming.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We explored a machine learning approach that modelled the extraction task as a named entity recognition task. We used a publicly available non-financial dataset as well as a small number of annotated publicly available financial documents. We used a conditional random field (CRF) to label entities. The training process was based on data from CoNLL-2003 which had annotations for the entity types PER (person), MISC (miscellaneous), ORG (organization) and LOC (location). We then assembled a collection of publiclyavailable loan agreements, and manually annotated them, to serve as training and test data. Our experimental results showed that, for this task and our proposed approach, small amounts of indomain training data are superior to large amounts of out-of-domain training data, and furthermore that supplementing the in-domain training data with out-of-domain data is actually detrimental to overall performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In future work, we intend to test this approach using different datasets with an expanded set of entity types specific to credit risk assessment, such as values and dates. An additional step would be carry out extrinsic evaluation of the output of the model in an actual credit risk assessment scenario. As part of this, we could attempt to identify additional features for risk assessment, beyond what is required by the financial authorities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "6" |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Information extraction from free-text business documents", |
|
"authors": [ |
|
{ |
|
"first": "Witold", |
|
"middle": [], |
|
"last": "Abramowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakub", |
|
"middle": [], |
|
"last": "Piskorski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Effective Databases for Text & Document Management", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "12--23", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Witold Abramowicz and Jakub Piskorski. 2003. In- formation extraction from free-text business doc- uments. In Stephanie Becker, editor, Effective Databases for Text & Document Management, pages 12-23. IRM Press.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Natural Language Processing with Python -Analyzing Text with the Natural Language Toolkit", |
|
"authors": [ |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Bird", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ewan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Loper", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Steven Bird, Ewan Klein, and Edward Loper. 2009. Natural Language Processing with Python -An- alyzing Text with the Natural Language Toolkit. O'Reilly Media, Sebastopol, USA.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Domain adaptation with structural correspondence learning", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Blitzer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the 2006 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "120--128", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Blitzer, Ryan McDonald, and Fernando Pereira. 2006. Domain adaptation with structural correspon- dence learning. In Proceedings of the 2006 Con- ference on Empirical Methods in Natural Language Processing, pages 120-128, Sydney, Australia.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Structured classification for multilingual natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Blunsom", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philip Blunsom. 2007. Structured classification for multilingual natural language processing. Ph.D. thesis, University of Melbourne Melbourne, Aus- tralia.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Recurring crises in Anglo-American corporate governance", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Clarke", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Contributions to Political Economy", |
|
"volume": "29", |
|
"issue": "1", |
|
"pages": "9--32", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Clarke. 2010. Recurring crises in Anglo- American corporate governance. Contributions to Political Economy, 29(1):9-32.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Information extraction, automatic", |
|
"authors": [ |
|
{ |
|
"first": "Hamish", |
|
"middle": [], |
|
"last": "Cunningham", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Encyclopedia of Language and Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "665--677", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hamish Cunningham. 2005. Information extraction, automatic. In Encyclopedia of Language and Lin- guistics, pages 665-677. Elsevier, 2nd edition. Guy Debelle. 2009. Some effects of the global financial crisis on australian financial mar- kets. http://www.rba.gov.au/speeches/ 2009/sp-ag-310309.html.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "What can we get from 1000 tokens? a case study of multilingual POS tagging for resource-poor languages", |
|
"authors": [ |
|
{ |
|
"first": "Long", |
|
"middle": [], |
|
"last": "Duong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Cohn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karin", |
|
"middle": [], |
|
"last": "Verspoor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Bird", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Cook", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "886--897", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Long Duong, Trevor Cohn, Karin Verspoor, Steven Bird, and Paul Cook. 2014. What can we get from 1000 tokens? a case study of multilingual POS tag- ging for resource-poor languages. In Proceedings of the 2014 Conference on Empirical Methods in Nat- ural Language Processing, pages 886-897, Doha, Qatar.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Rule-based named entity recognition for Greek financial texts", |
|
"authors": [ |
|
{ |
|
"first": "Dimitra", |
|
"middle": [], |
|
"last": "Farmakiotou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vangelis", |
|
"middle": [], |
|
"last": "Karkaletsis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Koutsias", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "George", |
|
"middle": [], |
|
"last": "Sigletos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Constantine", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Spyropoulos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Panagiotis", |
|
"middle": [], |
|
"last": "Stamatopoulos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proceedings of the Workshop on Computational lexicography and Multimedia Dictionaries (COM-LEX 2000)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "75--78", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dimitra Farmakiotou, Vangelis Karkaletsis, John Kout- sias, George Sigletos, Constantine D. Spyropoulos, and Panagiotis Stamatopoulos. 2000. Rule-based named entity recognition for Greek financial texts. In Proceedings of the Workshop on Computational lexicography and Multimedia Dictionaries (COM- LEX 2000), pages 75-78, Patras, Greece.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Exploiting domain structure for named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Jing", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chengxiang", |
|
"middle": [], |
|
"last": "Zhai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the Main Conference on Human Language Technology Conference of the North American Chapter of the Association of Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "74--81", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jing Jiang and ChengXiang Zhai. 2006. Exploiting domain structure for named entity recognition. In Proceedings of the Main Conference on Human Lan- guage Technology Conference of the North Amer- ican Chapter of the Association of Computational Linguistics, pages 74-81, New York, USA. Taku Kudo. 2013. CRF++: Yet another CRF toolkit. https://taku910.github.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Conditional random fields: Probabilistic models for segmenting and labeling sequence data", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Lafferty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of the 18th International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "282--289", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Lafferty, Andrew McCallum, and Fernando Pereira. 2001. Conditional random fields: Prob- abilistic models for segmenting and labeling se- quence data. In Proceedings of the 18th Interna- tional Conference on Machine Learning, pages 282- 289, Williamstown, USA.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Information extraction from legal texts: the potential of discourse analysis", |
|
"authors": [ |
|
{ |
|
"first": "Marie-Francine", |
|
"middle": [], |
|
"last": "Moens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caroline", |
|
"middle": [], |
|
"last": "Uyttendaele", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jos", |
|
"middle": [], |
|
"last": "Dumortier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "International Journal of Human-Computer Studies", |
|
"volume": "51", |
|
"issue": "6", |
|
"pages": "1155--1171", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marie-Francine Moens, Caroline Uyttendaele, and Jos Dumortier. 1999. Information extraction from le- gal texts: the potential of discourse analysis. In- ternational Journal of Human-Computer Studies, 51(6):1155-1171.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Syntaxbased semi-supervised named entity tagging", |
|
"authors": [ |
|
{ |
|
"first": "Behrang", |
|
"middle": [], |
|
"last": "Mohit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rebecca", |
|
"middle": [], |
|
"last": "Hwa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the ACL 2005 on Interactive Poster and Demonstration Sessions", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "57--60", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Behrang Mohit and Rebecca Hwa. 2005. Syntax- based semi-supervised named entity tagging. In Proceedings of the ACL 2005 on Interactive Poster and Demonstration Sessions, pages 57-60, Ann Ar- bor, USA. news.com.au. 2010. Poor risk assessment 'led to global financial crisis'. http://goo.gl/ f92sv8. Accessed 10 Nov, 2015.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Big data small data, in domain out-of domain, known word unknown word: The impact of word representations on sequence labelling tasks", |
|
"authors": [ |
|
{ |
|
"first": "Lizhen", |
|
"middle": [], |
|
"last": "Qu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gabriela", |
|
"middle": [], |
|
"last": "Ferraro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liyuan", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weiwei", |
|
"middle": [], |
|
"last": "Hou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nathan", |
|
"middle": [], |
|
"last": "Schneider", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Baldwin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 19th Conference on Natural Language Learning (CoNLL-2015)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "83--93", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lizhen Qu, Gabriela Ferraro, Liyuan Zhou, Wei- wei Hou, Nathan Schneider, and Timothy Baldwin. 2015. Big data small data, in domain out-of do- main, known word unknown word: The impact of word representations on sequence labelling tasks. In Proceedings of the 19th Conference on Natural Lan- guage Learning (CoNLL-2015), pages 83-93, Bei- jing, China.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Information Extraction. Foundations and Trends in Databases", |
|
"authors": [ |
|
{ |
|
"first": "Sunita", |
|
"middle": [], |
|
"last": "Sarawagi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "261--377", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sunita Sarawagi. 2008. Information Extraction. Foun- dations and Trends in Databases, 1(3):261-377.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "A rulebased system to extract financial information", |
|
"authors": [ |
|
{ |
|
"first": "Mahmudul", |
|
"middle": [], |
|
"last": "Sheikh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sumali", |
|
"middle": [], |
|
"last": "Conlon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Journal of Computer Information Systems", |
|
"volume": "52", |
|
"issue": "4", |
|
"pages": "10--19", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mahmudul Sheikh and Sumali Conlon. 2012. A rule- based system to extract financial information. Jour- nal of Computer Information Systems, 52(4):10-19.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "BRAT: A web-based tool for NLPassisted text annotation", |
|
"authors": [ |
|
{ |
|
"first": "Pontus", |
|
"middle": [], |
|
"last": "Stenetorp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sampo", |
|
"middle": [], |
|
"last": "Pyysalo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Goran", |
|
"middle": [], |
|
"last": "Topi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomoko", |
|
"middle": [], |
|
"last": "Ohta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sophia", |
|
"middle": [], |
|
"last": "Ananiadou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun'ichi", |
|
"middle": [], |
|
"last": "Tsujii", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the Demonstrations at the 13th Conference of the European Chapter of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "102--107", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pontus Stenetorp, Sampo Pyysalo, Goran Topi\u0107, Tomoko Ohta, Sophia Ananiadou, and Jun'ichi Tsu- jii. 2012. BRAT: A web-based tool for NLP- assisted text annotation. In Proceedings of the Demonstrations at the 13th Conference of the Euro- pean Chapter of the Association for Computational Linguistics, pages 102-107, Avignon, France.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Introduction to the CoNLL-2003 shared task: Language-independent named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Erik", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Tjong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kim", |
|
"middle": [], |
|
"last": "Sang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fien", |
|
"middle": [], |
|
"last": "De Meulder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of CoNLL-2003", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "142--147", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erik F. Tjong Kim Sang and Fien De Meulder. 2003. Introduction to the CoNLL-2003 shared task: Language-independent named entity recognition. In Proceedings of CoNLL-2003, pages 142-147. Ed- monton, Canada.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"text": "Learning curves showing the F-Score as more CONLL data is added for Experiment1 and Experiment3. Experiment3 starts in FIN5 and incrementally adding CONLL data.", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"text": "Learning curves showing the F-score as more financial training data is added for Experi-ment3 and Experiment 4.", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"text": "Results of testing over the financial data sets.", |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>1.0</td><td/><td/><td/><td/><td/></tr><tr><td/><td>0.8</td><td/><td/><td/><td/><td/></tr><tr><td>F1-Score</td><td>0.4 0.6</td><td/><td/><td/><td/><td/></tr><tr><td/><td>0.2</td><td/><td colspan=\"3\">Increasing Financial</td><td/></tr><tr><td/><td/><td/><td colspan=\"4\">CoNLL + increasing Financial</td></tr><tr><td/><td>0.0</td><td>100</td><td>300</td><td>500</td><td>700</td><td>900</td><td>1100</td></tr><tr><td/><td/><td/><td colspan=\"4\">Training Dataset Size (Sentences)</td></tr></table>" |
|
} |
|
} |
|
} |
|
} |