|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:46:54.066389Z" |
|
}, |
|
"title": "Swiss-Judgment-Prediction: A Multilingual Legal Judgment Prediction Benchmark", |
|
"authors": [ |
|
{ |
|
"first": "Joel", |
|
"middle": [], |
|
"last": "Niklaus", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Bern Bern", |
|
"location": { |
|
"country": "Switzerland" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Ilias", |
|
"middle": [], |
|
"last": "Chalkidis", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Coastal NLP Group", |
|
"institution": "University of Copenhagen", |
|
"location": { |
|
"settlement": "Copenhagen", |
|
"country": "Denmark" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Matthias", |
|
"middle": [], |
|
"last": "St\u00fcrmer", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Bern", |
|
"location": { |
|
"settlement": "Bern", |
|
"country": "Switzerland" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "In many jurisdictions, the excessive workload of courts leads to high delays. Suitable predictive AI models can assist legal professionals in their work, and thus enhance and speed up the process. So far, Legal Judgment Prediction (LJP) datasets have been released in English, French, and Chinese. We publicly release a multilingual (German, French, and Italian), diachronic (2000-2020) corpus of 85K cases from the Federal Supreme Court of Switzerland (FSCS). We evaluate state-of-the-art BERT-based methods including two variants of BERT that overcome the BERT input (text) length limitation (up to 512 tokens). Hierarchical BERT has the best performance (approx. 68-70% Macro-F1-Score in German and French). Furthermore, we study how several factors (canton of origin, year of publication, text length, legal area) affect performance. We release both the benchmark dataset and our code to accelerate future research and ensure reproducibility. 4 The dataset is not parallel, all cases are unique and decision are written only in a single language.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "In many jurisdictions, the excessive workload of courts leads to high delays. Suitable predictive AI models can assist legal professionals in their work, and thus enhance and speed up the process. So far, Legal Judgment Prediction (LJP) datasets have been released in English, French, and Chinese. We publicly release a multilingual (German, French, and Italian), diachronic (2000-2020) corpus of 85K cases from the Federal Supreme Court of Switzerland (FSCS). We evaluate state-of-the-art BERT-based methods including two variants of BERT that overcome the BERT input (text) length limitation (up to 512 tokens). Hierarchical BERT has the best performance (approx. 68-70% Macro-F1-Score in German and French). Furthermore, we study how several factors (canton of origin, year of publication, text length, legal area) affect performance. We release both the benchmark dataset and our code to accelerate future research and ensure reproducibility. 4 The dataset is not parallel, all cases are unique and decision are written only in a single language.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Frequently, legal information is available in textual form (e.g. court decisions, laws, legal articles or commentaries, contracts). With the abundance of legal texts comes the possibility of applying Natural Language Processing (NLP) techniques to tackle challenging tasks (Chalkidis and Kampas, 2018; Zhong et al., 2020; Chalkidis et al., 2021b) . In this work, we study the task of Legal Judgment Prediction (LJP) where the goal is to predict the outcome (verdict) of a decision given its facts (Aletras et al., 2016; \u015eulea et al., 2017; Luo et al., 2017; Chalkidis et al., 2019) . Many relevant applications and tasks, such as court opinion generation (Ye et al., 2018) and analysis (Wang et al., 2012) have been also studied, while there is also work aiming to interpret (explain) the decisions of particular courts (Ye et al., 2018; Chalkidis et al., 2021a) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 273, |
|
"end": 301, |
|
"text": "(Chalkidis and Kampas, 2018;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 302, |
|
"end": 321, |
|
"text": "Zhong et al., 2020;", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 322, |
|
"end": 346, |
|
"text": "Chalkidis et al., 2021b)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 497, |
|
"end": 519, |
|
"text": "(Aletras et al., 2016;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 520, |
|
"end": 539, |
|
"text": "\u015eulea et al., 2017;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 540, |
|
"end": 557, |
|
"text": "Luo et al., 2017;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 558, |
|
"end": 581, |
|
"text": "Chalkidis et al., 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 655, |
|
"end": 672, |
|
"text": "(Ye et al., 2018)", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 686, |
|
"end": 705, |
|
"text": "(Wang et al., 2012)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 820, |
|
"end": 837, |
|
"text": "(Ye et al., 2018;", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 838, |
|
"end": 862, |
|
"text": "Chalkidis et al., 2021a)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Models developed for LJP and relevant supportive tasks may assist both lawyers, e.g., help them prepare their arguments by identifying their strengths and weaknesses, and judges and clerks, e.g., review or prioritize cases, thus speeding up judicial processes and improving their quality. Especially in areas with many pending cases such as Indian 1 and Brazilian 2 jurisdictions or US immigration cases 3 the deployment of such models may drastically shorten the backlog. Such models can also help legal scholars to study case law (Katz, 2012) and help sociologists and research ethicists to expose irresponsible use of AI in the justice system (Angwin et al., 2016; Dressel and Farid, 2018) . So far, LJP datasets have been released for English (Katz et al., 2017; Medvedeva et al., 2018; Chalkidis et al., 2019) , French (\u015eulea et al., 2017) and Chinese Long et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 532, |
|
"end": 544, |
|
"text": "(Katz, 2012)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 646, |
|
"end": 667, |
|
"text": "(Angwin et al., 2016;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 668, |
|
"end": 692, |
|
"text": "Dressel and Farid, 2018)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 747, |
|
"end": 766, |
|
"text": "(Katz et al., 2017;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 767, |
|
"end": 790, |
|
"text": "Medvedeva et al., 2018;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 791, |
|
"end": 814, |
|
"text": "Chalkidis et al., 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 817, |
|
"end": 844, |
|
"text": "French (\u015eulea et al., 2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 857, |
|
"end": 875, |
|
"text": "Long et al., 2019)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We introduce a new multilingual, diachronic LJP dataset of FSCS cases, which spans 21 years (from 2000 to 2020) containing over 85K (50K German, 31K French and 4K Italian) cases. To the best of our knowledge, it is the only publicly available multilingual LJP dataset to date. Additionally, it is annotated with publication years, legal areas and cantons of origin; thus it can be used also as testbed for fairness and robustness in the critical application of NLP to law (Wang et al., 2021) . Rogers (2021) argues that the NLP community is investing many more resources in the development of models rather than data. As a result, there are not enough challenging, high-quality and well curated benchmarks available. Rogers assumes that the main reason for this imbalance is that the \"data work\" is considered less prestigious and top conferences are more likely to reject resource (dataset) papers. With our work (and the associated code and data) we hope to make a valuable contribution to the legal NLP field, where there are not many ready-to-use benchmarks available.", |
|
"cite_spans": [ |
|
{ |
|
"start": 472, |
|
"end": 491, |
|
"text": "(Wang et al., 2021)", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 494, |
|
"end": 507, |
|
"text": "Rogers (2021)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The contributions of this paper are threefold:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Contributions", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 We publicly release a large, high quality, curated, multilingual, diachronic dataset of 85K Swiss Federal Supreme Court (FSCS) cases annotated with the respective binarized judgment outcome (approval/dismissal), posing a challenging text classification task. We also provide additional metadata, i.e., the publication year, the legal area and the canton of origin per case, to promote robustness and fairness studies on the critical area of legal NLP (Wang et al., 2021 ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 453, |
|
"end": 471, |
|
"text": "(Wang et al., 2021", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Contributions", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 We provide experimental results with strong baselines representing the current state-of-the-art in NLP. Since the average length of the facts (850 tokens in the French part) is longer than the 512 tokens limit by BERT (Devlin et al., 2019) , special methods are needed to cope with that. We show results comparing standard BERT models (up to 512 tokens) with two variants (hierarchical and prolonged BERT) that use up to 2048 tokens.", |
|
"cite_spans": [ |
|
{ |
|
"start": 220, |
|
"end": 241, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Contributions", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 We analyze the results of the German dataset in terms of diachronicity (publication year), legal area and input (text) length and the French dataset by canton of origin. We find that performance deteriorates as cases are getting more complex (longer facts), while also performance varies across legal areas. There is no sign of performance fluctuation across years.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Contributions", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "European Court of Human Rights (ECtHR) Aletras et al. (2016) introduced a dataset of 584 ECtHR cases concerning the violation or not of three articles of the European Convention of Human Rights (ECHR) . They used a Support Vector Machine (SVM) (Cortes and Vapnik, 1995) with Bag-of-Words (BoW) (n-grams) and topical features on a simplified binarized LJP. In contrast to our work, they evaluated with random 10-fold cross-validation instead of the more realistic temporal split based on the date (S\u00f8gaard et al., 2021) . Medvedeva et al. (2018) extended the ECtHR dataset to include 9 instead of 3 Articles resulting in a total of approx. 11.5K cases. They also experimented with an SVM operating on n-grams on the LJP task. Chalkidis et al. (2019) experimented on a similarly sized dataset using neural methods. On the binary LJP task, they improve the state-of-the-art using a hierarchical version of BERT. Additionally, they experimented with a multi-label LJP task predicting for each of the 66 ECHR Articles whether it is violated or not.", |
|
"cite_spans": [ |
|
{ |
|
"start": 39, |
|
"end": 60, |
|
"text": "Aletras et al. (2016)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 181, |
|
"end": 200, |
|
"text": "Human Rights (ECHR)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 244, |
|
"end": 269, |
|
"text": "(Cortes and Vapnik, 1995)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 496, |
|
"end": 518, |
|
"text": "(S\u00f8gaard et al., 2021)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 521, |
|
"end": 544, |
|
"text": "Medvedeva et al. (2018)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 725, |
|
"end": 748, |
|
"text": "Chalkidis et al. (2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Katz et al. (2017) experimented on LJP with 28K cases from the SCOTUS spanning almost two centuries. They trained a Random Forest (Breiman, 2001 ) classifier using extensive feature engineering with many non textual features. Kaufman et al. (2019) improved results using an ADABoost (Freund and Schapire, 1997) classifier, while also incorporating more textual information (i.e., statements made by the court judges during oral arguments).", |
|
"cite_spans": [ |
|
{ |
|
"start": 130, |
|
"end": 144, |
|
"text": "(Breiman, 2001", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 226, |
|
"end": 247, |
|
"text": "Kaufman et al. (2019)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 283, |
|
"end": 310, |
|
"text": "(Freund and Schapire, 1997)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supreme Court of the United States (SCOTUS)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "French Supreme Court (Court of Cassation) Sulea et al. (2017) studied the LJP task on a dataset of approx. 127K French Supreme Court cases. They experimented on a 6-class and a 8-class setting using an SVM with BoW features. They reported very high scores, which they claim are justified by the high predictability of the French Supreme Court. Although they used as input the entire case description and not only the facts, thus there is a strong possibility of label information leak. They also used 10-fold stratified crossvalidation selecting the test part at random. 3 Data Description", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supreme Court of the United States (SCOTUS)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The decisions were downloaded from the platform entscheidsuche.ch and have been pre-processed by the means of HTML parsers and Regular Expressions (RegExps). The dataset contains more than 85K decisions from the FSCS written in three languages (50K German, 31K French, 4K Italian) from the years 2000 to 2020. 4 The FSCS is the last level of appeal in Switzerland and hears only the most controversial cases which could not have been sufficiently well solved by (up to two) lower courts. In their decisions, they often focus only on small parts of previous decision, where they discuss possible wrong reasoning by the lower court. This makes these cases particularly challenging. In order to fight the reproducibility crisis (Britz, 2020) , we release the Swiss-Judgment-Prediction dataset on Zenodo 5 and on Hugging Face 6 , while also open-sourcing the complete code used for constructing the dataset 7 as well as for running the experiments 8 on GitHub.", |
|
"cite_spans": [ |
|
{ |
|
"start": 725, |
|
"end": 738, |
|
"text": "(Britz, 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 822, |
|
"end": 823, |
|
"text": "6", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset Construction", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "A typical Swiss court decision is made up of the following four main sections: rubrum, facts, considerations and rulings. 9 The rubrum (introduction) contains the date and chamber, mentions the involved judge(s) and parties and finally states the topic of the decision. The facts describe what happened in the case and form the basis for the considerations of the court. The higher the level of appeal, the more general and summarized the facts. The considerations reflect the formal legal reasoning which form the basis for the final ruling. Here the court cites laws and other influential rulings. The rulings, constituting the final section, are an enumeration of the binding decisions made by the court. This section is normally rather short and summarizes the considerations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Structure of Court Decisions", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We deliberately did not consider the considerations as input to the model, unlike Aletras et al. (2016) for the following reasons.The facts are the section which is most similar to a general description of the case, which may be more widely available, while being less biased. 10 Additionally, the facts do not change that much from one to the next level of appeal (apart from being more concise and summarized in the higher levels of appeal). According to estimations from several court clerks we consulted, the facts take approximately 10% of the time for drafting a decision while the considerations take 85% and the outcome 5% (45%, 50% and 5% in penal law respectively). So, most of the work being done by the judges and clerks results in the legal considerations. Therefore, we would expect the model to perform better if it had access to the considerations. But on the other hand, the value of the model would be far smaller, since most of the work is already done, once the considerations are written. Thus, to create a more realistic and challenging scenario, we consider only the facts as input for the predictive models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 82, |
|
"end": 103, |
|
"text": "Aletras et al. (2016)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Use of Facts instead of Considerations", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "The cases have been originally labeled with 6 labels: approval, partial approval, dismissal, partial dismissal, inadmissible and write off. The first four are judged on the basis of the facts (merits) and the last two for formal reasons. A case is considered inadmissible, if there are formal deficiencies with the appeal or if the court is not responsible to rule the case. A court rules write off if the case has become redundant so there is no reason for the proceeding anymore. This can be for several reasons, such as an out-of-court settlement or procedural association (two proceedings are unified). Approval and partial approval mean that the request is deemed valid or partially valid respectively. Dismissal and partial dismissal mean that the request is denied or partially denied respectively. A partial decision is usually ruled in parallel with a decision of the opposite kind or with inadmissible.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Binarized LJP Task -Verdict Labeling Simplification", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In practice, court decisions may have multiple requests (questions), where each can be judged indi-vidually. Since the structure of the outcomes in the decisions is non-standard, parsing them automatically is very challenging. Therefore, we decided to focus on the main request only and discard all side (secondary) requests. Even the main request sometimes contains multiple judgments referring to different parts of the main request, with some more important than others (it is very hard to automatically detect their criticality). So, to simplify the task and make it more concise, we transform the document labeling from a list of partial judgments into a single judgment, as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Binarized LJP Task -Verdict Labeling Simplification", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "1. We excluded all cases that have been ruled with both an approval and a dismissal in the main request, since that could be rather confusing. 2. We excluded cases ruled with write off outcomes since these cases are rejected for formal reasons that are not written (described) in the facts. Therefore, a model has no chance of inferring it correctly. We also excluded cases with inadmissible outcomes for similar reasons. 3. Since partial approvals/dismissals are very hard to distinguish from full approvals/dismissals respectively, we converted all the partial ones to full ones. Thus, the final labeling includes two possible outcomes, approvals and dismissals (i.e., the court \"leans\" positive or negative to the request).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Binarized LJP Task -Verdict Labeling Simplification", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "By implementing these simplifications, we made the dataset more feasible (solvable) and semantically coherent targeting the core ruling process (see Section 5). Table 2 shows the numbers of decisions after each processing step. Note that we reduced the dataset with these preprocessing steps significantly (from over 141K to close to 85K decisions) to achieve higher quality. We also made the task structurally simpler by converting it from a multilabel to a binary classification task. 11 The dataset is highly imbalanced containing more than 3 4 dismissed cases (see Table 1 for de- It is useful for a human estimation of the length and for methods building upon word embeddings (Mikolov et al., 2013; Pennington et al., 2014) . The orange histogram shows the distribution in sub-word units (generated by the SentencePiece tokenizer (Kudo and Richardson, 2018) used in BERT). It is useful e.g. for estimating the maximum sequence length of a BERT-like model. Decisions with length over 4000 tokens have been grouped in the last bin. tails). The label skewness makes the classification task quite hard and beating dummy baselines, e.g., predicting always the majority class, on microaveraged measures (e.g., Micro-F1) is challenging. In our opinion, macro-averaged measures (e.g., Macro-F1) are more suitable in this setting, since they consider both outcomes (classes); they can also better discriminate better methods. In other words, they favor models that can actually learn the task (discriminate the two classes) and they do not always predict the majority class, i.e., dismissal, regardless of the facts. Table 2 : Rulings is the number of cases where rulings could be extracted. Judgments is the number of cases where we could extract any judgment types described in Section 3.3. Binarized is the number of cases considered in the final dataset after removing decisions containing labels other than approval or dismissal.", |
|
"cite_spans": [ |
|
{ |
|
"start": 487, |
|
"end": 489, |
|
"text": "11", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 681, |
|
"end": 703, |
|
"text": "(Mikolov et al., 2013;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 704, |
|
"end": 728, |
|
"text": "Pennington et al., 2014)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 835, |
|
"end": 862, |
|
"text": "(Kudo and Richardson, 2018)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 161, |
|
"end": 168, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 569, |
|
"end": 576, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 1613, |
|
"end": 1620, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The Binarized LJP Task -Verdict Labeling Simplification", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "This Section presents statistics about the distribution of cases according to different metadata like input (text) length, legal area and origin cantons. Figure 1 shows the distribution of the document (facts of the case) length of French cases. 12 We see that there are very few decisions with more", |
|
"cite_spans": [ |
|
{ |
|
"start": 246, |
|
"end": 248, |
|
"text": "12", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 154, |
|
"end": 162, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Case Distribution", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "12 See Figures 7 and 8 in Appendix C for the German and Italian cases, respectively. than 2K tokens in German (very similar for Italian). The French decisions are more evenly distributed, including a large portion of decisions with more than 4K tokens. For all languages, there is a considerable portion of decisions (50%+) containing more than 512 sub-word units (BERTs maximum sequence length) posing a fundamental challenge for standard BERT models. Table 3 presents the distribution of legal areas across languages. The legal areas are derived from the chambers where the decisions were heard. The website of the FSCS 13 describes in detail what kinds of cases the different chambers hear. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 7, |
|
"end": 22, |
|
"text": "Figures 7 and 8", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 453, |
|
"end": 460, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The Curse of Long Documents", |
|
"sec_num": "3.4.1" |
|
}, |
|
{ |
|
"text": "To study robustness and fairness in terms of geographical (regional) groups, we extracted the canton of origin from the decisions. As we observe in Table 4 , most of the cantons (e.g., Z\u00fcrich, Ticino) are monolingual and the distribution of the cases across cantons is very skewed with 1-2 cantons per language covering a large portion of the total cases. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 148, |
|
"end": 155, |
|
"text": "Table 4", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Origin Cantons", |
|
"sec_num": "3.4.3" |
|
}, |
|
{ |
|
"text": "We first experiment with three baselines. The first one is a majority baseline that selects the majority (dismissal) class always across cases. The stratified baseline predicts labels randomly, respecting the training distribution. The last baseline is a linear classifier relying on TF-IDF features for the 35K most frequent n-grams in the training set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "BERT (Devlin et al., 2019) and its variants Lan et al., 2020) , inter alia, dominate NLP as state-of-the-art in many tasks . Hence, we examine an arsenal of BERT-based methods.", |
|
"cite_spans": [ |
|
{ |
|
"start": 5, |
|
"end": 26, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 44, |
|
"end": 61, |
|
"text": "Lan et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BERT-based methods", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Standard BERT We experimented with monolingual BERT models for German (Chan et al., 2019) , French (Martin et al., 2020) and Italian (Parisi et al., 2020) and also the multilingual BERT of (Devlin et al., 2019) . Since the facts are often longer than 512 tokens (see Section 3 for details), there is a need to adapt the models to long textual input.", |
|
"cite_spans": [ |
|
{ |
|
"start": 70, |
|
"end": 89, |
|
"text": "(Chan et al., 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 99, |
|
"end": 120, |
|
"text": "(Martin et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 133, |
|
"end": 154, |
|
"text": "(Parisi et al., 2020)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 189, |
|
"end": 210, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BERT-based methods", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Long BERT is an extension of the standard BERT models, where we extend the maximum sequence length by introducing additional positional embeddings. In our case, the additional positional encodings have been initialized by replicating the original pre-trained 512 ones 4 times (2048 in total). While Long BERT can process the full text in the majority of the cases, its extension leads to longer processing time and higher memory requirements.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BERT-based methods", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Hierarchical BERT, similar to the one presented in Chalkidis et al. (2019) , uses a shared standard BERT encoder processing segments up to 512 tokens to encode each segment independently. To aggregate all (in our case 4) segment encodings, we pass them through an additional Bidirectional Long Short-Term Memory (BiLSTM) encoder and concatenate the final LSTM output states to form a single document representation for classification.", |
|
"cite_spans": [ |
|
{ |
|
"start": 51, |
|
"end": 74, |
|
"text": "Chalkidis et al. (2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BERT-based methods", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In this Section, we describe the conducted experiments alongside the presentation of the results and an analysis of the results of the German dataset in terms of diachronicity (judgment year), legal area, input (text) length and canton of origin.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "During training, we over-sample the cases representing the minority class (approval). 14 Across BERT-based methods, we use Early Stopping on development data, an initial learning rate of 3e-5 and batch size 64 across experiments. The standard BERT models have been trained and evaluated with maximum sequence length 512 and the two variants of BERT with maximum sequence length 2048. The 2048 input length has been chosen based on a balance between memory and compute restrictions and the statistics of the length of facts (see Section 3.4.1), where we see that the vast majority of cases contains less than 2K tokens. Additionally, this gives us the possibility to investigate differences by input (text) length (see Section 5.3.2). We report both micro-and macroaveraged F1-score on the test set. Micro-F1 is averaged across samples whereas Macro-F1 is averaged across samples inside each class and then across the classes. Therefore, a test example in 65.4 \u00b1 0.2 52.6 \u00b1 0.1 a minority class has a higher weight in Macro-F1 than an example from the majority class. In classification problems with imbalanced class distributions (such as the one we examine), Macro-F1 is more realistic than Micro-F1 given that we are equally interested in both classes. Each experiment has been run with 5 different random seeds. We report the average score and standard deviation across experiments. The experiments have been performed on a single GeForce RTX 3090 GPU with mixed precision and gradient accumulation. We used the Hugging Face Transformers library (Wolf et al., 2020) and the BERT models available from https://huggingface.co/models. Table 5 shows the results across methods for all language subsets. We observe that the native BERT models outperform their multi-lingual counterpart; while not being domain-specific, these models can still better model the case facts. Given the high class imbalance, all BERT-based methods underperform in Micro-F1, being biased towards dismissal performance compared to the naive Majority baseline, while doing substantially better in Macro-F1. Hierarchical and Long BERT-based methods consistently out-perform the linear classifiers across languages (+10% in Macro-F1), while standard BERT is comparable or better than lin-ear models, although it considers only up to 512 tokens. While performance of BERT-based methods is quite comparable between the German and French subsets with 35K and 21K training samples respectively, it is far worse in the Italian subset, where there are only 3K training samples. In two out of three languages (German and French with 20K+ training samples) hierarchical BERT has borderline better performance compared to long BERT (+1.6-2.2% in Macro-F1), but in both cases the difference is very close to the error margin (standard deviation). We would like to remark that the results of Hierarchical BERT could possibly be improved considering a finer segmentation of the text into sentences or paragraphs. 15 We leave the investigation for alternative segmentation schemes for future work.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1549, |
|
"end": 1568, |
|
"text": "(Wolf et al., 2020)", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 2973, |
|
"end": 2975, |
|
"text": "15", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1635, |
|
"end": 1642, |
|
"text": "Table 5", |
|
"ref_id": "TABREF9" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental SetUp", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "In this section, we analyze the results in relation to specific attributes (publication year, input (text) length, legal area and canton of origin) in order to evaluate the model robustness and identify how specific aspects affect the model performance. 75.4 \u00b1 3.9 69.4 \u00b1 2.5 civil law 1574 16.5% 73.6 \u00b1 4.8 55.5 \u00b1 1.0 79.0 \u00b1 3.4 65.1 \u00b1 2.4 78.9 \u00b1 3.8 65.9 \u00b1 2.8 Table 6 : We used the German native BERT model pre-trained and evaluated on the German data. In the German test set there are no insurance law cases and only 3 cases with other legal areas. The area where models perform best is in bold and the area where they perform worst is underlined. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 363, |
|
"end": 370, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion -Bivariate Analysis", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "In Figure 2 , we present the results grouped by years in the test set (2017-2020). We cannot identify a notable fluctuation in performance across years as there is a very small decrease in performance (approx. -2% in Macro-F1); most probably because the testing time-frame is really short (4 years). Comparing the performance between the validation (2015-2016) and the test (2017-2020) set (approx. 70% vs. 68.5%), again we do not observe an exceptional fluctuation time-wise.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 11, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Diachronicity", |
|
"sec_num": "5.3.1" |
|
}, |
|
{ |
|
"text": "In Figure 3 , we observe that model performance deteriorates as input (text) length increases, i.e., there is an absolute negative correlation between performance and input (text) length. The two variants of BERT improve results, especially in cases with 512 to 2048 tokens. Since the two variants of BERT have a maximum length of 2048 they perform similar to the standard BERT type in cases longer than 2048 tokens.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 11, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Input (Text) Length", |
|
"sec_num": "5.3.2" |
|
}, |
|
{ |
|
"text": "In Table 6 , we observe that the models do not equally perform across legal areas. All models seem to be much more accurate in penal law cases, while the performance is much worse (approx. 30%) in public law cases. According to the experts, the jurisprudence in penal law is more united and aligned in Switzerland and outlier judgments are rarer making the task more predictable. Additionally, in the case of not enough evidence the principle of \"in dubio pro reo\" (reasonable doubt) is applied. 16 Another possible reason for the higher performance in penal law could be the increased work performed by the legal clerks in drafting the facts of the case (see Section 3.2.1), thus including more useful information relevant to the task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 496, |
|
"end": 498, |
|
"text": "16", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Legal Area", |
|
"sec_num": "5.3.3" |
|
}, |
|
{ |
|
"text": "In Figure 4 , we observe a performance disparity across cantons, although this is neither correlated with the number of cases per canton, nor with the dismissal/approval rate per canton. Thus, the disparity is either purely coincidental and has to do with the difficulty of particular cases in some cantons or there are other factors (e.g., societal, economics) worth considering in future work. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 11, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Canton of Origin", |
|
"sec_num": "5.3.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "V D V S N E F R J U B E C", |
|
"eq_num": "H" |
|
} |
|
], |
|
"section": "G E", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We introduced a new multilingual, diachronic dataset of 85K Swiss Federal Supreme Court (FSCS) cases, including cases in German, French, and Italian. We presented results considering three alternative BERT-based methods, including methods that can process up to 2048 tokens and thus can read the entirety of the facts in most cases. We found that these methods outperform the standard BERT models and have the best results in Macro-F1, while the naive majority classifier has the best overall results in Micro-F1 due to the high class imbalance of the dataset (more than 3 4 of the cases are dismissed). Further on, we presented a bivariate analysis between performance and multiple factors (diachronicity, input (text) length, legal area, and canton of origin). The analysis showed that performance deteriorates as input (text) length increases, while the results in cases from different legal areas or cantons vary raising questions on models' robustness under different attributes.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions & Future Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In future work, we would like to investigate the application of cross-lingual transfer learning techniques, for example the use of Adapters (Houlsby et al., 2019; Pfeiffer et al., 2020) . In this case, we could possibly improve the poor performance in the Italian subset, where approx. 3K cases exists, by training a multilingual model across all languages, thus exploiting all available resources, ignoring the traditional language barrier. In the same direction, we could also exploit and transfer knowledge from other annotated datasets that aim at the LJP task (e.g., ECtHR and SCOTUS).", |
|
"cite_spans": [ |
|
{ |
|
"start": 140, |
|
"end": 162, |
|
"text": "(Houlsby et al., 2019;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 163, |
|
"end": 185, |
|
"text": "Pfeiffer et al., 2020)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions & Future Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "More in depth analysis on robustness is also an interesting future avenue. In this direction, we would like to explore distributional robust optimization (DRO) techniques (Koh et al., 2021; Wang et al., 2021 ) that aim to mitigate disparities across groups of interest, i.e., labels, cantons and/or legal areas could be both considered in this framework.", |
|
"cite_spans": [ |
|
{ |
|
"start": 171, |
|
"end": 189, |
|
"text": "(Koh et al., 2021;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 190, |
|
"end": 207, |
|
"text": "Wang et al., 2021", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions & Future Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Another interesting direction is a deeper analysis with models handling long textual input (Beltagy et al., 2020; Zaheer et al., 2020) using alternative attention schemes (window-based, dilated, etc.). Furthermore, none of the examined pre-trained models is legal-oriented, thus pre-training and evaluating such specialized models is also needed, similarly to the English Legal-BERT of Chalkidis et al. (2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 91, |
|
"end": 113, |
|
"text": "(Beltagy et al., 2020;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 114, |
|
"end": 134, |
|
"text": "Zaheer et al., 2020)", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 386, |
|
"end": 409, |
|
"text": "Chalkidis et al. (2020)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions & Future Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The scope of this work is not to produce a robot lawyer, but rather to study LJP in order to broaden the discussion and help practitioners to build assisting technology for legal professionals. We believe that this is an important application field, where research should be conducted (Tsarapatsanis and Aletras, 2021) to improve legal services and democratize law, while also highlight (inform the audience on) the various multi-aspect shortcomings seeking a responsible and ethical (fair) deployment of technology. In this direction, we provide a welldocumented public resource for three languages (German, French, and Italian) that are underrepresented in legal NLP literature. We also provide annotations for several attributes (year of publication, legal area, canton/region) and provide a bivariate analysis discussing the shortcomings to further promote new studies in terms of fairness and robustness (Wang et al., 2021) , a critical part of NLP application in law. All decisions (original material) are publicly available on the entscheidsuche.ch platform and the names of the parties have been redacted (See Figures 5 and 6 ) by the court according to its official guidelines 17 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 285, |
|
"end": 318, |
|
"text": "(Tsarapatsanis and Aletras, 2021)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 909, |
|
"end": 928, |
|
"text": "(Wang et al., 2021)", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1118, |
|
"end": 1133, |
|
"text": "Figures 5 and 6", |
|
"ref_id": "FIGREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ethics Statement", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "BERT RoBERTa standard 3.377E+11 3.398E+11 long 1.365E+12 1.374E+12 hierarchical 1.476E+12 1.477E+12 Table 7 : This table shows the total floating point operations per epoch per training example used for training each type. Each model has been trained for 2 to 4 epochs (variable because of early stopping). This table can be used to choose a suitable model with limited resources. Additionally, it can be used to measure the environmental impact. Table 7 shows the training effort required for finetuning each type. Training one of the types capable of handling long input results in 4 to 5 times more training operations compared to the standard model. This seems justifiable since the gain from the longer models in terms of F1 score is considerable. Also, the entire cost of finetuning is relatively small.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 100, |
|
"end": 107, |
|
"text": "Table 7", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 447, |
|
"end": 454, |
|
"text": "Table 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Type", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this appendix we show some examples of court decisions with their respective labels. Figure 5 shows an example of a dismissed decision and Figure 6 an example of an approved decision. Both decisions are relatively short, but still contain all sections (rubrum, facts, considerations and judgments). They are both very recent, dating from 2019 and 2017 respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 88, |
|
"end": 96, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF5" |
|
}, |
|
{ |
|
"start": 142, |
|
"end": 150, |
|
"text": "Figure 6", |
|
"ref_id": "FIGREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "B Examples", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this appendix we show the input length distributions for the German (Figure 7) and Italian ( Figure 8 ) datasets. We observe that the average Italian decision is longer than the average German decision. Additionally, there is also a higher density in moderately long decisions (over 1000 tokens) and there are many more decisions over 4000 tokens. Apart from the availability of more training data in the German dataset, the shorter decisions may also be an important factor in the better performance we see in most models trained on the German dataset in comparison to the Italian case and to some extent the French case (see Table 5 ).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 71, |
|
"end": 81, |
|
"text": "(Figure 7)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 96, |
|
"end": 105, |
|
"text": "Figure 8", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 631, |
|
"end": 638, |
|
"text": "Table 5", |
|
"ref_id": "TABREF9" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "C Input Length Distribution", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this appendix, we show tables belonging to plots in the main paper to show the exact numbers. Table 8 shows the results regarding the different input lengths. Table 9 shows the results regarding different years in the test set. Table 10 shows the model performance across different cantons.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 97, |
|
"end": 105, |
|
"text": "Table 8", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 163, |
|
"end": 170, |
|
"text": "Table 9", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 232, |
|
"end": 240, |
|
"text": "Table 10", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "D Tables to Plots", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this appendix we show the results of training the models with class weights instead of oversampling. Table 11 shows the training results. We notice, that for many configurations (especially with XLM-R), the model only learns the majority classifier. This leads to a very low Macro-F1 score. We also experimented with undersampling as an alternative to oversampling, but saw similar results to the training with class weights.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 112, |
|
"text": "Table 11", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "E Training with Class Weights", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this appendix, we discuss the reliability of the confidence scores of the classifier output alongside the predictions. The confidence scores are computed by taking the softmax on the classifier outputs, so that we get a probability (confidence) score of a given class between 0 and 100. The hierarchical and long BERT types show an increase in both the confidence in the correct predictions and the incorrect predictions compared to the standard BERT type (with the increase in the correct predictions being more pronounced). This finding holds across all three languages. Figure 7: This histogram shows the distribution of the input length for German decisions. The blue histogram is generated from tokens generated by the spacy tokenizer (regular words). The orange histogram is generated from tokens generated by the SentencePiece tokenizer used in BERT (subword units). Decisions with length over 4000 tokens are grouped in the last bin (before 4000).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "F Classifier Confidence", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Figure 8: This histogram shows the distribution of the input length for Italian decisions. The blue histogram is generated from tokens generated by the spacy tokenizer (regular words). The orange histogram is generated from tokens generated by the SentencePiece tokenizer used in BERT (subword units). Decisions with length over 4000 tokens are grouped in the last bin (before 4000).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "F Classifier Confidence", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://tinyurl.com/mjy2uf9a 2 https://tinyurl.com/2uttucmn 3 https://tinyurl.com/4ybhhff8", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Note however, that the facts are drafted together with the considerations and are often formulated in a way to support the reasoning in the considerations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Although, we look forward to recover at least part of the complexity in the future, if we have the appropriate resources to manually extract per-request judgments, introducing a new multi-task (multi-question) LJP dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://tinyurl.com/52a4x8yz (in German)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In preliminary experiments, we find that this sampling methodology outperforms both the standard Empirical Risk Minimization (ERM) and the class-wise weighting of the loss penalty, i.e., considering each class loss 50-50.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Currently, we segment the text into chunks of 512 tokens to avoid excessive padding that will further increase the needed number of segments and will lead to even higher time and memory demands.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The principle of \"in dubio pro reo\", i.e., \"When in doubt, in favor of the defendant.\", is only applicable in penal law cases.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://tinyurl.com/mtu23szy (In German)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work has been supported by the Swiss National Research Programme \"Digital Transformation\" (NRP-77) 18 grant number 187477. This work is also partly funded by the Innovation Fund Denmark (IFD) 19 under File No. 0175-00011A. We would like to thank: Daniel Kettiger, Magda Chodup, and Thomas L\u00fcthi for their legal advice, Adrian J\u00f6rg for help in coding, and Entscheidsuche.ch for providing the data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "74.5 \u00b1 4.0 64.8 \u00b1 1.9 76.0 \u00b1 3.7 67.5 \u00b1 1.7 76.9 \u00b1 3.8 68.3 \u00b1 1.6 2020 73.5 \u00b1 4.2 62.4 \u00b1 1.6 76.6 \u00b1 3.4 67.8 \u00b1 1.8 77.4 \u00b1 3.1 68.5 \u00b1 1.5 76.1 \u00b1 7.4 48.4 \u00b1 4.9 80.4 \u00b1 1.9 44.7 \u00b1 0.4 long Native BERT 81.9 \u00b1 1.2 69.5 \u00b1 0.9 81.8 \u00b1 1.5 69.4 \u00b1 1.7 80.2 \u00b1 1.4 46.1 \u00b1 2.2 XLM-RoBERTa 81.5 \u00b1 0.7 59.4 \u00b1 9.6 81.5 \u00b1 0.5 51. 85.9 \u00b1 15.2 70.8 \u00b1 13.9 88.7 \u00b1 14.7 71.4 \u00b1 13.4 Table 12 : This table shows the average confidence scores (0-100) of the different types of multilingual BERT models on the test set for correct and incorrect predictions respectively. Both the mean and standard deviation are averaged over 5 random seeds. The model has been finetuned on the entire dataset (all languages) and evaluated on the respective language.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 361, |
|
"end": 369, |
|
"text": "Table 12", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "annex", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Predicting judicial decisions of the European Court of Human Rights: a Natural Language Processing perspective", |
|
"authors": [ |
|
{ |
|
"first": "Nikolaos", |
|
"middle": [], |
|
"last": "Aletras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dimitrios", |
|
"middle": [], |
|
"last": "Tsarapatsanis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Preo\u0163iuc-Pietro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vasileios", |
|
"middle": [], |
|
"last": "Lampos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "PeerJ Computer Science", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.7717/peerj-cs.93" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nikolaos Aletras, Dimitrios Tsarapatsanis, Daniel Preo\u0163iuc-Pietro, and Vasileios Lampos. 2016. Pre- dicting judicial decisions of the European Court of Human Rights: a Natural Language Processing per- spective. PeerJ Computer Science, 2:e93. Publisher: PeerJ Inc.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Machine bias: There's software used across the country to predict future criminals. and it's biased against blacks", |
|
"authors": [ |
|
{ |
|
"first": "Julia", |
|
"middle": [], |
|
"last": "Angwin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Larson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Surya", |
|
"middle": [], |
|
"last": "Mattu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lauren", |
|
"middle": [], |
|
"last": "Kirchner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Julia Angwin, Jeff Larson, Surya Mattu, and Lauren Kirchner. 2016. Machine bias: There's software used across the country to predict future criminals. and it's biased against blacks. ProPublica.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Longformer: The Long-Document Transformer", |
|
"authors": [ |
|
{ |
|
"first": "Iz", |
|
"middle": [], |
|
"last": "Beltagy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arman", |
|
"middle": [], |
|
"last": "Cohan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2004.05150[cs].ArXiv:2004.05150" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Iz Beltagy, Matthew E. Peters, and Arman Cohan. 2020. Longformer: The Long-Document Transformer. arXiv:2004.05150 [cs]. ArXiv: 2004.05150.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Random forests. Machine Learning", |
|
"authors": [ |
|
{ |
|
"first": "Leo", |
|
"middle": [], |
|
"last": "Breiman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "", |
|
"volume": "45", |
|
"issue": "", |
|
"pages": "5--32", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1023/A:1010933404324" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leo Breiman. 2001. Random forests. Machine Learn- ing, 45(1):5-32.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "2020. AI Research, Replicability and Incentives", |
|
"authors": [ |
|
{ |
|
"first": "Denny", |
|
"middle": [], |
|
"last": "Britz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Denny Britz. 2020. AI Research, Replicability and In- centives.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Neural Legal Judgment Prediction in English", |
|
"authors": [], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4317--4323", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1424" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ilias Chalkidis, Ion Androutsopoulos, and Nikolaos Aletras. 2019. Neural Legal Judgment Prediction in English. In Proceedings of the 57th Annual Meet- ing of the Association for Computational Linguis- tics, pages 4317-4323, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Prodromos Malakasiotis, Nikolaos Aletras, and Ion Androutsopoulos", |
|
"authors": [ |
|
{ |
|
"first": "Ilias", |
|
"middle": [], |
|
"last": "Chalkidis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manos", |
|
"middle": [], |
|
"last": "Fergadiotis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Findings of the Association for Computational Linguistics: EMNLP 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2898--2904", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.findings-emnlp.261" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ilias Chalkidis, Manos Fergadiotis, Prodromos Malaka- siotis, Nikolaos Aletras, and Ion Androutsopoulos. 2020. LEGAL-BERT: The muppets straight out of law school. In Findings of the Association for Com- putational Linguistics: EMNLP 2020, pages 2898- 2904, Online. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Paragraph-level rationale extraction through regularization: A case study on european court of human rights cases", |
|
"authors": [], |
|
"year": null, |
|
"venue": "Manos Fergadiotis, Dimitrios Tsarapatsanis, Nikolaos Aletras, Ion Androutsopoulos, and Prodromos Malakasiotis. 2021a", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ilias Chalkidis, Manos Fergadiotis, Dimitrios Tsarapat- sanis, Nikolaos Aletras, Ion Androutsopoulos, and Prodromos Malakasiotis. 2021a. Paragraph-level rationale extraction through regularization: A case study on european court of human rights cases. In Proceedings of the Annual Conference of the North American Chapter of the Association for Computa- tional Linguistics, online. 18 https://www.nfp77.ch/en/ 19 https://innovationsfonden.dk/en", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Ion Androutsopoulos, Daniel Martin Katz, and Nikolaos Aletras. 2021b. LexGLUE: A Benchmark Dataset for Legal Language Understanding in", |
|
"authors": [ |
|
{ |
|
"first": "Ilias", |
|
"middle": [], |
|
"last": "Chalkidis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abhik", |
|
"middle": [], |
|
"last": "Jana", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dirk", |
|
"middle": [], |
|
"last": "Hartung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Bommarito", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ilias Chalkidis, Abhik Jana, Dirk Hartung, Michael Bommarito, Ion Androutsopoulos, Daniel Martin Katz, and Nikolaos Aletras. 2021b. LexGLUE: A Benchmark Dataset for Legal Language Understand- ing in English.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Deep learning in law: early adaptation and legal word embeddings trained on large corpora", |
|
"authors": [ |
|
{ |
|
"first": "Ilias", |
|
"middle": [], |
|
"last": "Chalkidis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dimitrios", |
|
"middle": [], |
|
"last": "Kampas", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Artificial Intelligence and Law", |
|
"volume": "27", |
|
"issue": "", |
|
"pages": "171--198", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ilias Chalkidis and Dimitrios Kampas. 2018. Deep learning in law: early adaptation and legal word em- beddings trained on large corpora. Artificial Intelli- gence and Law, 27:171-198.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Tanay Soni, and Chin Man Yeung. 2019. deepset -Open Sourcing German BERT", |
|
"authors": [ |
|
{ |
|
"first": "Branden", |
|
"middle": [], |
|
"last": "Chan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timo", |
|
"middle": [], |
|
"last": "M\u00f6ller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Malte", |
|
"middle": [], |
|
"last": "Pietsch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Branden Chan, Timo M\u00f6ller, Malte Pietsch, Tanay Soni, and Chin Man Yeung. 2019. deepset -Open Sourcing German BERT.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Support vector networks", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Cortes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Vapnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Machine Learning", |
|
"volume": "20", |
|
"issue": "", |
|
"pages": "273--297", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C. Cortes and V. Vapnik. 1995. Support vector net- works. Machine Learning, 20:273-297.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.04805[cs].ArXiv:1810.04805" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. arXiv:1810.04805 [cs]. ArXiv: 1810.04805.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "The accuracy, fairness, and limits of predicting recidivism", |
|
"authors": [ |
|
{ |
|
"first": "Julia", |
|
"middle": [], |
|
"last": "Dressel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hany", |
|
"middle": [], |
|
"last": "Farid", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Science Advances", |
|
"volume": "", |
|
"issue": "10", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Julia Dressel and Hany Farid. 2018. The accuracy, fair- ness, and limits of predicting recidivism. Science Advances, 4(10).", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "A decisiontheoretic generalization of on-line learning and an application to boosting", |
|
"authors": [ |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Freund", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Schapire", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Journal of Computer and System Sciences", |
|
"volume": "55", |
|
"issue": "1", |
|
"pages": "119--139", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1006/jcss.1997.1504" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoav Freund and Robert E Schapire. 1997. A decision- theoretic generalization of on-line learning and an application to boosting. Journal of Computer and System Sciences, 55(1):119-139.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "spaCy: Industrial-strength Natural Language Processing in Python", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.5281/zenodo.1212303" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Honnibal, Ines Montani, Sofie Van Lan- deghem, and Adriane Boyd. 2020. spaCy: Industrial-strength Natural Language Processing in Python.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Parameter-efficient transfer learning for nlp", |
|
"authors": [ |
|
{ |
|
"first": "Neil", |
|
"middle": [], |
|
"last": "Houlsby", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrei", |
|
"middle": [], |
|
"last": "Giurgiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stanislaw", |
|
"middle": [], |
|
"last": "Jastrzebski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bruna", |
|
"middle": [], |
|
"last": "Morrone", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quentin", |
|
"middle": [], |
|
"last": "De Laroussilhe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Gesmundo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mona", |
|
"middle": [], |
|
"last": "Attariyan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sylvain", |
|
"middle": [], |
|
"last": "Gelly", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 36th International Conference on Machine Learning (ICML)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin de Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. 2019. Parameter-efficient transfer learning for nlp. In Proceedings of the 36th International Conference on Machine Learning (ICML), Long Beach, CA, USA.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Few-Shot Charge Prediction with Discriminative Legal Attributes", |
|
"authors": [ |
|
{ |
|
"first": "Zikun", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cunchao", |
|
"middle": [], |
|
"last": "Tu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "487--498", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zikun Hu, Xiang Li, Cunchao Tu, Zhiyuan Liu, and Maosong Sun. 2018. Few-Shot Charge Prediction with Discriminative Legal Attributes. In Proceed- ings of the 27th International Conference on Compu- tational Linguistics, pages 487-498, Santa Fe, New Mexico, USA. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Quantitative legal prediction-or-how I learned to stop worrying and start preparing for the data-driven future of the legal services industry", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Daniel Martin Katz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Emory Law Journal", |
|
"volume": "62", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Martin Katz. 2012. Quantitative legal prediction-or-how I learned to stop worrying and start preparing for the data-driven future of the legal services industry. Emory Law Journal, 62:909.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "A general approach for predicting the behavior of the Supreme Court of the United States", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Daniel Martin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Katz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josh", |
|
"middle": [], |
|
"last": "Bommarito Ii", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Blackman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "PLOS ONE", |
|
"volume": "12", |
|
"issue": "4", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1371/journal.pone.0174698" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Martin Katz, Michael J. Bommarito Ii, and Josh Blackman. 2017. A general approach for predict- ing the behavior of the Supreme Court of the United States. PLOS ONE, 12(4):e0174698. Publisher: Public Library of Science.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Improving supreme court forecasting using boosted decision trees", |
|
"authors": [ |
|
{ |
|
"first": "Aaron", |
|
"middle": [ |
|
"Russell" |
|
], |
|
"last": "Kaufman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Kraft", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maya", |
|
"middle": [], |
|
"last": "Sen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Political Analysis", |
|
"volume": "27", |
|
"issue": "3", |
|
"pages": "381--387", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1017/pan.2018.59" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aaron Russell Kaufman, Peter Kraft, and Maya Sen. 2019. Improving supreme court forecasting us- ing boosted decision trees. Political Analysis, 27(3):381-387.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "WILDS: A benchmark of in-the-wild distribution shifts", |
|
"authors": [ |
|
{ |
|
"first": "Pang", |
|
"middle": [], |
|
"last": "Wei Koh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shiori", |
|
"middle": [], |
|
"last": "Sagawa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Henrik", |
|
"middle": [], |
|
"last": "Marklund", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sang", |
|
"middle": [ |
|
"Michael" |
|
], |
|
"last": "Xie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marvin", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Akshay", |
|
"middle": [], |
|
"last": "Balsubramani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weihua", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michihiro", |
|
"middle": [], |
|
"last": "Yasunaga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [ |
|
"Lanas" |
|
], |
|
"last": "Phillips", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Irena", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tony", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Etienne", |
|
"middle": [], |
|
"last": "David", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ian", |
|
"middle": [], |
|
"last": "Stavness", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "International Conference on Machine Learning (ICML)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pang Wei Koh, Shiori Sagawa, Henrik Mark- lund, Sang Michael Xie, Marvin Zhang, Akshay Balsubramani, Weihua Hu, Michihiro Yasunaga, Richard Lanas Phillips, Irena Gao, Tony Lee, Eti- enne David, Ian Stavness, Wei Guo, Berton A. Earn- shaw, Imran S. Haque, Sara Beery, Jure Leskovec, Anshul Kundaje, Emma Pierson, Sergey Levine, Chelsea Finn, and Percy Liang. 2021. WILDS: A benchmark of in-the-wild distribution shifts. In International Conference on Machine Learning (ICML).", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "SentencePiece: A simple and language independent subword tokenizer and detokenizer for Neural Text Processing", |
|
"authors": [ |
|
{ |
|
"first": "Taku", |
|
"middle": [], |
|
"last": "Kudo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Richardson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1808.06226[cs].ArXiv:1808.06226" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Taku Kudo and John Richardson. 2018. SentencePiece: A simple and language independent subword tok- enizer and detokenizer for Neural Text Processing. arXiv:1808.06226 [cs]. ArXiv: 1808.06226.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Piyush Sharma, and Radu Soricut. 2020. ALBERT: A Lite BERT for Selfsupervised Learning of Language Representations", |
|
"authors": [ |
|
{ |
|
"first": "Zhenzhong", |
|
"middle": [], |
|
"last": "Lan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mingda", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Goodman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Gimpel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1909.11942[cs].ArXiv:1909.11942" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, and Radu Sori- cut. 2020. ALBERT: A Lite BERT for Self- supervised Learning of Language Representations. arXiv:1909.11942 [cs]. ArXiv: 1909.11942.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "MANN: A Multichannel Attentive Neural Network for Legal Judgment Prediction", |
|
"authors": [ |
|
{ |
|
"first": "Shang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongli", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lin", |
|
"middle": [], |
|
"last": "Ye", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "151144--151155", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/ACCESS.2019.2945771" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shang Li, Hongli Zhang, Lin Ye, Xiaoding Guo, and Binxing Fang. 2019. MANN: A Multichannel At- tentive Neural Network for Legal Judgment Predic- tion. IEEE Access, 7:151144-151155. Conference Name: IEEE Access.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "RoBERTa: A Robustly Optimized BERT Pretraining Approach", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.11692[cs].ArXiv:1907.11692" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. RoBERTa: A Robustly Optimized BERT Pretrain- ing Approach. arXiv:1907.11692 [cs]. ArXiv: 1907.11692.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Automatic Judgment Prediction via Legal Reading Comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Shangbang", |
|
"middle": [], |
|
"last": "Long", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cunchao", |
|
"middle": [], |
|
"last": "Tu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Chinese Computational Linguistics, Lecture Notes in Computer Science", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "558--572", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/978-3-030-32381-3_45" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shangbang Long, Cunchao Tu, Zhiyuan Liu, and Maosong Sun. 2019. Automatic Judgment Predic- tion via Legal Reading Comprehension. In Chinese Computational Linguistics, Lecture Notes in Com- puter Science, pages 558-572, Cham. Springer In- ternational Publishing.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Learning to Predict Charges for Criminal Cases with Legal Basis", |
|
"authors": [ |
|
{ |
|
"first": "Bingfeng", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yansong", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianbo", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dongyan", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2727--2736", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D17-1289" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bingfeng Luo, Yansong Feng, Jianbo Xu, Xiang Zhang, and Dongyan Zhao. 2017. Learning to Predict Charges for Criminal Cases with Legal Basis. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 2727-2736, Copenhagen, Denmark. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "\u00c9ric Villemonte De La Clergerie", |
|
"authors": [ |
|
{ |
|
"first": "Louis", |
|
"middle": [], |
|
"last": "Martin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Muller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pedro Javier Ortiz", |
|
"middle": [], |
|
"last": "Su\u00e1rez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoann", |
|
"middle": [], |
|
"last": "Dupont", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laurent", |
|
"middle": [], |
|
"last": "Romary", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Djam\u00e9 Seddah, and Beno\u00eet Sagot. 2020. CamemBERT: a Tasty French Language Model", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7203--7219", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.645" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Louis Martin, Benjamin Muller, Pedro Javier Ortiz Su\u00e1rez, Yoann Dupont, Laurent Romary, \u00c9ric Ville- monte De La Clergerie, Djam\u00e9 Seddah, and Beno\u00eet Sagot. 2020. CamemBERT: a Tasty French Lan- guage Model. pages 7203-7219.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Judicial decisions of the European Court of Human Rights: Looking into the crystal ball", |
|
"authors": [ |
|
{ |
|
"first": "Masha", |
|
"middle": [], |
|
"last": "Medvedeva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michel", |
|
"middle": [], |
|
"last": "Vols", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martijn", |
|
"middle": [], |
|
"last": "Wieling", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Conference on Empirical Legal Studies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Masha Medvedeva, Michel Vols, and Martijn Wieling. 2018. Judicial decisions of the European Court of Human Rights: Looking into the crystal ball. In Proceedings of the Conference on Empirical Legal Studies, page 24.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Efficient Estimation of Word Representations in Vector Space", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1301.3781" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013. Efficient Estimation of Word Repre- sentations in Vector Space. arXiv:1301.3781 [cs].", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "UmBERTo: an Italian Language Model trained with Whole Word Masking", |
|
"authors": [ |
|
{ |
|
"first": "Loreto", |
|
"middle": [], |
|
"last": "Parisi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simone", |
|
"middle": [], |
|
"last": "Francia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paolo", |
|
"middle": [], |
|
"last": "Magnani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Original", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Loreto Parisi, Simone Francia, and Paolo Magnani. 2020. UmBERTo: an Italian Language Model trained with Whole Word Masking. Original-date: 2020-01-10T09:55:31Z.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Glove: Global Vectors for Word Representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/D14-1162" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. Glove: Global Vectors for Word Representation. In Proceedings of the 2014 Con- ference on Empirical Methods in Natural Language Processing (EMNLP), pages 1532-1543, Doha, Qatar. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "MAD-X: An Adapter-Based Framework for Multi-Task Cross-Lingual Transfer", |
|
"authors": [ |
|
{ |
|
"first": "Jonas", |
|
"middle": [], |
|
"last": "Pfeiffer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Vuli\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Ruder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7654--7673", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.emnlp-main.617" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jonas Pfeiffer, Ivan Vuli\u0107, Iryna Gurevych, and Se- bastian Ruder. 2020. MAD-X: An Adapter-Based Framework for Multi-Task Cross-Lingual Transfer. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 7654-7673, Online.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Changing the World by Changing the Data", |
|
"authors": [ |
|
{ |
|
"first": "Anna", |
|
"middle": [ |
|
"Rogers" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "2182--2194", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2021.acl-long.170" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anna Rogers. 2021. Changing the World by Changing the Data. In Proceedings of the 59th Annual Meet- ing of the Association for Computational Linguistics and the 11th International Joint Conference on Nat- ural Language Processing (Volume 1: Long Papers), pages 2182-2194, Online. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "We need to talk about random splits", |
|
"authors": [ |
|
{ |
|
"first": "Anders", |
|
"middle": [], |
|
"last": "S\u00f8gaard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Ebert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jasmijn", |
|
"middle": [], |
|
"last": "Bastings", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katja", |
|
"middle": [], |
|
"last": "Filippova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 2021 Conference of the European Chapter of the Association for Computational Linguistics (EACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anders S\u00f8gaard, Sebastian Ebert, Jasmijn Bastings, and Katja Filippova. 2021. We need to talk about random splits. In Proceedings of the 2021 Confer- ence of the European Chapter of the Association for Computational Linguistics (EACL), Online.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "On the ethical limits of natural language processing on legal text", |
|
"authors": [ |
|
{ |
|
"first": "Dimitrios", |
|
"middle": [], |
|
"last": "Tsarapatsanis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikolaos", |
|
"middle": [], |
|
"last": "Aletras", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3590--3599", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2021.findings-acl.314" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dimitrios Tsarapatsanis and Nikolaos Aletras. 2021. On the ethical limits of natural language processing on legal text. In Findings of the Association for Com- putational Linguistics: ACL-IJCNLP 2021, pages 3590-3599, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Design and Implementation of German Legal Decision Corpora", |
|
"authors": [ |
|
{ |
|
"first": "Stefanie", |
|
"middle": [], |
|
"last": "Urchs", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jelena", |
|
"middle": [], |
|
"last": "Mitrovi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Granitzer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 13th International Conference on Agents and Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "515--521", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.5220/0010187305150521" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stefanie Urchs, Jelena Mitrovi\u0107, and Michael Granitzer. 2021. Design and Implementation of German Legal Decision Corpora:. In Proceedings of the 13th Inter- national Conference on Agents and Artificial Intelli- gence, pages 515-521, Online Streaming, -Select a Country -. SCITEPRESS -Science and Technol- ogy Publications.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yada", |
|
"middle": [], |
|
"last": "Pruksachatkun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikita", |
|
"middle": [], |
|
"last": "Nangia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanpreet", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel R", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Wang, Yada Pruksachatkun, Nikita Nangia, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R Bowman. 2019. SuperGLUE: A Stickier Benchmark for General-Purpose Lan- guage Understanding Systems. page 30.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "GLUE: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanpreet", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 EMNLP Workshop Black-boxNLP: Analyzing and Interpreting Neural Networks for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "353--355", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-5446" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Wang, Amanpreet Singh, Julian Michael, Fe- lix Hill, Omer Levy, and Samuel Bowman. 2018. GLUE: A Multi-Task Benchmark and Analysis Plat- form for Natural Language Understanding. In Proceedings of the 2018 EMNLP Workshop Black- boxNLP: Analyzing and Interpreting Neural Net- works for NLP, pages 353-355, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Historical Analysis of Legal Opinions with a Sparse Mixed-Effects Latent Variable Model", |
|
"authors": [ |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "William", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elijah", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Suresh", |
|
"middle": [], |
|
"last": "Mayfield", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeremiah", |
|
"middle": [], |
|
"last": "Naidu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dittmar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "740--749", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "William Yang Wang, Elijah Mayfield, Suresh Naidu, and Jeremiah Dittmar. 2012. Historical Analysis of Legal Opinions with a Sparse Mixed-Effects Latent Variable Model. In Proceedings of the 50th Annual Meeting of the Association for Computational Lin- guistics (Volume 1: Long Papers), pages 740-749, Jeju Island, Korea. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Equality before the law: Legal judgment consistency analysis for fairness", |
|
"authors": [ |
|
{ |
|
"first": "Yuzhong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chaojun", |
|
"middle": [], |
|
"last": "Xiao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shirong", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haoxi", |
|
"middle": [], |
|
"last": "Zhong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cunchao", |
|
"middle": [], |
|
"last": "Tu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianyang", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Science China -Information Sciences", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yuzhong Wang, Chaojun Xiao, Shirong Ma, Haoxi Zhong, Cunchao Tu, Tianyang Zhang, Zhiyuan Liu, and Maosong Sun. 2021. Equality before the law: Legal judgment consistency analysis for fairness. Science China -Information Sciences, abs/2103.13868.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Transformers: State-of-the-Art Natural Language Processing", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clement", |
|
"middle": [], |
|
"last": "Delangue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Moi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierric", |
|
"middle": [], |
|
"last": "Cistac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Remi", |
|
"middle": [], |
|
"last": "Louf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Morgan", |
|
"middle": [], |
|
"last": "Funtowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joe", |
|
"middle": [], |
|
"last": "Davison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Shleifer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clara", |
|
"middle": [], |
|
"last": "Patrick Von Platen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yacine", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Jernite", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Canwen", |
|
"middle": [], |
|
"last": "Plu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Teven", |
|
"middle": [ |
|
"Le" |
|
], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sylvain", |
|
"middle": [], |
|
"last": "Scao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mariama", |
|
"middle": [], |
|
"last": "Gugger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Drame", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "38--45", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.emnlp-demos.6" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, Remi Louf, Morgan Funtow- icz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander Rush. 2020. Trans- formers: State-of-the-Art Natural Language Process- ing. In Proceedings of the 2020 Conference on Em- pirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "CAIL2018: A Large-Scale Legal Dataset for Judgment Prediction", |
|
"authors": [ |
|
{ |
|
"first": "Chaojun", |
|
"middle": [], |
|
"last": "Xiao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haoxi", |
|
"middle": [], |
|
"last": "Zhong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhipeng", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cunchao", |
|
"middle": [], |
|
"last": "Tu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yansong", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xianpei", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhen", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1807.02478" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chaojun Xiao, Haoxi Zhong, Zhipeng Guo, Cunchao Tu, Zhiyuan Liu, Maosong Sun, Yansong Feng, Xi- anpei Han, Zhen Hu, Heng Wang, and Jianfeng Xu. 2018. CAIL2018: A Large-Scale Legal Dataset for Judgment Prediction. arXiv:1807.02478 [cs].", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "A Recurrent Attention Network for Judgment Prediction", |
|
"authors": [ |
|
{ |
|
"first": "Ze", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pengfei", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Linjun", |
|
"middle": [], |
|
"last": "Shou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenwen", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Artificial Neural Networks and Machine Learning -ICANN 2019: Text and Time Series", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "253--266", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/978-3-030-30490-4_21" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ze Yang, Pengfei Wang, Lei Zhang, Linjun Shou, and Wenwen Xu. 2019. A Recurrent Attention Network for Judgment Prediction. In Artificial Neural Net- works and Machine Learning -ICANN 2019: Text and Time Series, Lecture Notes in Computer Sci- ence, pages 253-266, Cham. Springer International Publishing.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "XLNet: Generalized Autoregressive Pretraining for Language Understanding", |
|
"authors": [ |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Quoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1906.08237" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Car- bonell, Ruslan Salakhutdinov, and Quoc V. Le. 2020. XLNet: Generalized Autoregressive Pretraining for Language Understanding. arXiv:1906.08237 [cs].", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Hierarchical attention networks for document classification", |
|
"authors": [ |
|
{ |
|
"first": "Zichao", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diyi", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Smola", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1480--1489", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N16-1174" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zichao Yang, Diyi Yang, Chris Dyer, Xiaodong He, Alex Smola, and Eduard Hovy. 2016. Hierarchical attention networks for document classification. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, pages 1480-1489, San Diego, California. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "Interpretable Charge Predictions for Criminal Cases: Learning to Generate Court Views from Fact Descriptions", |
|
"authors": [ |
|
{ |
|
"first": "Hai", |
|
"middle": [], |
|
"last": "Ye", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xin", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhunchen", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenhan", |
|
"middle": [], |
|
"last": "Chao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1854--1864", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-1168" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hai Ye, Xin Jiang, Zhunchen Luo, and Wenhan Chao. 2018. Interpretable Charge Predictions for Crimi- nal Cases: Learning to Generate Court Views from Fact Descriptions. In Proceedings of the 2018 Con- ference of the North American Chapter of the Asso- ciation for Computational Linguistics: Human Lan- guage Technologies, Volume 1 (Long Papers), pages 1854-1864, New Orleans, Louisiana. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "Big bird: Transformers for longer sequences", |
|
"authors": [ |
|
{ |
|
"first": "Manzil", |
|
"middle": [], |
|
"last": "Zaheer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guru", |
|
"middle": [], |
|
"last": "Guruganesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joshua", |
|
"middle": [], |
|
"last": "Kumar Avinava Dubey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Ainslie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Santiago", |
|
"middle": [], |
|
"last": "Alberti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Ontanon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anirudh", |
|
"middle": [], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qifan", |
|
"middle": [], |
|
"last": "Ravula", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Manzil Zaheer, Guru Guruganesh, Kumar Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago On- tanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, et al. 2020. Big bird: Transformers for longer sequences. Advances in Neural Information Processing Systems, 33.", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "Legal Judgment Prediction via Topological Learning", |
|
"authors": [ |
|
{ |
|
"first": "Haoxi", |
|
"middle": [], |
|
"last": "Zhong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhipeng", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cunchao", |
|
"middle": [], |
|
"last": "Tu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chaojun", |
|
"middle": [], |
|
"last": "Xiao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3540--3549", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1390" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Haoxi Zhong, Zhipeng Guo, Cunchao Tu, Chaojun Xiao, Zhiyuan Liu, and Maosong Sun. 2018. Le- gal Judgment Prediction via Topological Learning. In Proceedings of the 2018 Conference on Em- pirical Methods in Natural Language Processing, pages 3540-3549, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "How does NLP benefit legal system: A summary of legal artificial intelligence", |
|
"authors": [ |
|
{ |
|
"first": "Haoxi", |
|
"middle": [], |
|
"last": "Zhong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chaojun", |
|
"middle": [], |
|
"last": "Xiao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cunchao", |
|
"middle": [], |
|
"last": "Tu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianyang", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5218--5230", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.466" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Haoxi Zhong, Chaojun Xiao, Cunchao Tu, Tianyang Zhang, Zhiyuan Liu, and Maosong Sun. 2020. How does NLP benefit legal system: A summary of legal artificial intelligence. In Proceedings of the 58th An- nual Meeting of the Association for Computational Linguistics, pages 5218-5230, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF51": { |
|
"ref_id": "b51", |
|
"title": "Predicting the Law Area and Decisions of French Supreme Court Cases", |
|
"authors": [ |
|
{ |
|
"first": "Marcos", |
|
"middle": [], |
|
"last": "Octavia-Maria\u015fulea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mihaela", |
|
"middle": [], |
|
"last": "Zampieri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Vela", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Van Genabith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the International Conference Recent Advances in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "716--722", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.26615/978-954-452-049-6_092" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Octavia-Maria\u015eulea, Marcos Zampieri, Mihaela Vela, and Josef van Genabith. 2017. Predicting the Law Area and Decisions of French Supreme Court Cases. In Proceedings of the International Confer- ence Recent Advances in Natural Language Process- ing, RANLP 2017, pages 716-722, Varna, Bulgaria. INCOMA Ltd.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"text": "The distribution of the document (the facts of a case) length for French decisions. The blue histogram shows the document (case) length distribution in regular words (using the spacy tokenizer (Honnibal et al., 2020)).", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"text": "This table compares the different BERT types on cases from different years. We used the native German BERT model.", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF3": { |
|
"type_str": "figure", |
|
"text": "This table compares the different long BERT types on different input (text) lengths. We used the native German BERT model.", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF4": { |
|
"type_str": "figure", |
|
"text": "This table compares the different long BERT types on different origin cantons. We used the native French BERT model. The cantons are sorted by the number of cases in the training set descending.", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF5": { |
|
"type_str": "figure", |
|
"text": "This is an example of a dismissed decision:https://tinyurl.com/n44hathc", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF6": { |
|
"type_str": "figure", |
|
"text": "This is an example of an approved decision:https://tinyurl.com/mjxfjn65", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"TABREF2": { |
|
"text": "The number of cases per label (approval, dismissal) in each language subset.", |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"num": null |
|
}, |
|
"TABREF5": { |
|
"text": "The distribution of legal areas in each language subset.", |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"num": null |
|
}, |
|
"TABREF7": { |
|
"text": "", |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"num": null |
|
}, |
|
"TABREF9": { |
|
"text": "All the models have been trained and evaluated in the same language. With Native BERT we mean the BERT model pre-trained in the respective language. The best scores for each language are in bold. Given the high class imbalance, BERT-based methods under-perform in Micro-F1 compared to the Majority baseline, while being substantially better in Macro-F1.", |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |