SaylorTwift HF Staff commited on
Commit
fa45937
·
verified ·
1 Parent(s): 7c31aac

Delete loading script

Browse files
Files changed (1) hide show
  1. lexglue.py +0 -572
lexglue.py DELETED
@@ -1,572 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """LexGLUE: A Benchmark Dataset for Legal Language Understanding in English."""
16
-
17
- import csv
18
- import json
19
- import textwrap
20
-
21
- import datasets
22
- import os
23
-
24
- MAIN_CITATION = """\
25
- @article{chalkidis-etal-2021-lexglue,
26
- title={{LexGLUE}: A Benchmark Dataset for Legal Language Understanding in English},
27
- author={Chalkidis, Ilias and
28
- Jana, Abhik and
29
- Hartung, Dirk and
30
- Bommarito, Michael and
31
- Androutsopoulos, Ion and
32
- Katz, Daniel Martin and
33
- Aletras, Nikolaos},
34
- year={2021},
35
- eprint={2110.00976},
36
- archivePrefix={arXiv},
37
- primaryClass={cs.CL},
38
- note = {arXiv: 2110.00976},
39
- }"""
40
-
41
- _DESCRIPTION = """\
42
- Legal General Language Understanding Evaluation (LexGLUE) benchmark is
43
- a collection of datasets for evaluating model performance across a diverse set of legal NLU tasks
44
- """
45
-
46
- ECTHR_ARTICLES = ["2", "3", "5", "6", "8", "9", "10", "11", "14", "P1-1"]
47
-
48
- EUROVOC_CONCEPTS = [
49
- "100163",
50
- "100168",
51
- "100169",
52
- "100170",
53
- "100171",
54
- "100172",
55
- "100173",
56
- "100174",
57
- "100175",
58
- "100176",
59
- "100177",
60
- "100179",
61
- "100180",
62
- "100183",
63
- "100184",
64
- "100185",
65
- "100186",
66
- "100187",
67
- "100189",
68
- "100190",
69
- "100191",
70
- "100192",
71
- "100193",
72
- "100194",
73
- "100195",
74
- "100196",
75
- "100197",
76
- "100198",
77
- "100199",
78
- "100200",
79
- "100201",
80
- "100202",
81
- "100204",
82
- "100205",
83
- "100206",
84
- "100207",
85
- "100212",
86
- "100214",
87
- "100215",
88
- "100220",
89
- "100221",
90
- "100222",
91
- "100223",
92
- "100224",
93
- "100226",
94
- "100227",
95
- "100229",
96
- "100230",
97
- "100231",
98
- "100232",
99
- "100233",
100
- "100234",
101
- "100235",
102
- "100237",
103
- "100238",
104
- "100239",
105
- "100240",
106
- "100241",
107
- "100242",
108
- "100243",
109
- "100244",
110
- "100245",
111
- "100246",
112
- "100247",
113
- "100248",
114
- "100249",
115
- "100250",
116
- "100252",
117
- "100253",
118
- "100254",
119
- "100255",
120
- "100256",
121
- "100257",
122
- "100258",
123
- "100259",
124
- "100260",
125
- "100261",
126
- "100262",
127
- "100263",
128
- "100264",
129
- "100265",
130
- "100266",
131
- "100268",
132
- "100269",
133
- "100270",
134
- "100271",
135
- "100272",
136
- "100273",
137
- "100274",
138
- "100275",
139
- "100276",
140
- "100277",
141
- "100278",
142
- "100279",
143
- "100280",
144
- "100281",
145
- "100282",
146
- "100283",
147
- "100284",
148
- "100285",
149
- ]
150
-
151
- LEDGAR_CATEGORIES = [
152
- "Adjustments",
153
- "Agreements",
154
- "Amendments",
155
- "Anti-Corruption Laws",
156
- "Applicable Laws",
157
- "Approvals",
158
- "Arbitration",
159
- "Assignments",
160
- "Assigns",
161
- "Authority",
162
- "Authorizations",
163
- "Base Salary",
164
- "Benefits",
165
- "Binding Effects",
166
- "Books",
167
- "Brokers",
168
- "Capitalization",
169
- "Change In Control",
170
- "Closings",
171
- "Compliance With Laws",
172
- "Confidentiality",
173
- "Consent To Jurisdiction",
174
- "Consents",
175
- "Construction",
176
- "Cooperation",
177
- "Costs",
178
- "Counterparts",
179
- "Death",
180
- "Defined Terms",
181
- "Definitions",
182
- "Disability",
183
- "Disclosures",
184
- "Duties",
185
- "Effective Dates",
186
- "Effectiveness",
187
- "Employment",
188
- "Enforceability",
189
- "Enforcements",
190
- "Entire Agreements",
191
- "Erisa",
192
- "Existence",
193
- "Expenses",
194
- "Fees",
195
- "Financial Statements",
196
- "Forfeitures",
197
- "Further Assurances",
198
- "General",
199
- "Governing Laws",
200
- "Headings",
201
- "Indemnifications",
202
- "Indemnity",
203
- "Insurances",
204
- "Integration",
205
- "Intellectual Property",
206
- "Interests",
207
- "Interpretations",
208
- "Jurisdictions",
209
- "Liens",
210
- "Litigations",
211
- "Miscellaneous",
212
- "Modifications",
213
- "No Conflicts",
214
- "No Defaults",
215
- "No Waivers",
216
- "Non-Disparagement",
217
- "Notices",
218
- "Organizations",
219
- "Participations",
220
- "Payments",
221
- "Positions",
222
- "Powers",
223
- "Publicity",
224
- "Qualifications",
225
- "Records",
226
- "Releases",
227
- "Remedies",
228
- "Representations",
229
- "Sales",
230
- "Sanctions",
231
- "Severability",
232
- "Solvency",
233
- "Specific Performance",
234
- "Submission To Jurisdiction",
235
- "Subsidiaries",
236
- "Successors",
237
- "Survival",
238
- "Tax Withholdings",
239
- "Taxes",
240
- "Terminations",
241
- "Terms",
242
- "Titles",
243
- "Transactions With Affiliates",
244
- "Use Of Proceeds",
245
- "Vacations",
246
- "Venues",
247
- "Vesting",
248
- "Waiver Of Jury Trials",
249
- "Waivers",
250
- "Warranties",
251
- "Withholdings",
252
- ]
253
-
254
- SCDB_ISSUE_AREAS = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13"]
255
-
256
- UNFAIR_CATEGORIES = [
257
- "Limitation of liability",
258
- "Unilateral termination",
259
- "Unilateral change",
260
- "Content removal",
261
- "Contract by using",
262
- "Choice of law",
263
- "Jurisdiction",
264
- "Arbitration",
265
- ]
266
-
267
- CASEHOLD_LABELS = ["0", "1", "2", "3", "4"]
268
-
269
-
270
- class LexGlueConfig(datasets.BuilderConfig):
271
- """BuilderConfig for LexGLUE."""
272
-
273
- def __init__(
274
- self,
275
- url,
276
- data_url,
277
- data_file,
278
- citation,
279
- **kwargs,
280
- ):
281
- """BuilderConfig for LexGLUE.
282
-
283
- Args:
284
- text_column: ``string`, name of the column in the jsonl file corresponding
285
- to the text
286
- label_column: `string`, name of the column in the jsonl file corresponding
287
- to the label
288
- url: `string`, url for the original project
289
- data_url: `string`, url to download the zip file from
290
- data_file: `string`, filename for data set
291
- citation: `string`, citation for the data set
292
- url: `string`, url for information about the data set
293
- label_classes: `list[string]`, the list of classes if the label is
294
- categorical. If not provided, then the label will be of type
295
- `datasets.Value('float32')`.
296
- multi_label: `boolean`, True if the task is multi-label
297
- dev_column: `string`, name for the development subset
298
- **kwargs: keyword arguments forwarded to super.
299
- """
300
- super(LexGlueConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
301
- self.url = url
302
- self.data_url = data_url
303
- self.data_file = data_file
304
- self.citation = citation
305
-
306
-
307
- class LexGLUE(datasets.GeneratorBasedBuilder):
308
- """LexGLUE: A Benchmark Dataset for Legal Language Understanding in English. Version 1.0"""
309
-
310
- BUILDER_CONFIGS = [
311
-
312
- LexGlueConfig(
313
- name="all",
314
- description="",
315
- data_url="",
316
- data_file="",
317
- url="",
318
- citation=""
319
- ),
320
- LexGlueConfig(
321
- name="ecthr_a",
322
- description=textwrap.dedent(
323
- """\
324
- The European Court of Human Rights (ECtHR) hears allegations that a state has
325
- breached human rights provisions of the European Convention of Human Rights (ECHR).
326
- For each case, the dataset provides a list of factual paragraphs (facts) from the case description.
327
- Each case is mapped to articles of the ECHR that were violated (if any)."""
328
- ),
329
- data_url="https://zenodo.org/record/5532997/files/ecthr.tar.gz",
330
- data_file="ecthr.jsonl",
331
- url="https://archive.org/details/ECtHR-NAACL2021",
332
- citation=textwrap.dedent(
333
- """\
334
- @inproceedings{chalkidis-etal-2021-paragraph,
335
- title = "Paragraph-level Rationale Extraction through Regularization: A case study on {E}uropean Court of Human Rights Cases",
336
- author = "Chalkidis, Ilias and
337
- Fergadiotis, Manos and
338
- Tsarapatsanis, Dimitrios and
339
- Aletras, Nikolaos and
340
- Androutsopoulos, Ion and
341
- Malakasiotis, Prodromos",
342
- booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
343
- month = jun,
344
- year = "2021",
345
- address = "Online",
346
- publisher = "Association for Computational Linguistics",
347
- url = "https://aclanthology.org/2021.naacl-main.22",
348
- doi = "10.18653/v1/2021.naacl-main.22",
349
- pages = "226--241",
350
- }
351
- }"""
352
- ),
353
- ),
354
- LexGlueConfig(
355
- name="ecthr_b",
356
- description=textwrap.dedent(
357
- """\
358
- The European Court of Human Rights (ECtHR) hears allegations that a state has
359
- breached human rights provisions of the European Convention of Human Rights (ECHR).
360
- For each case, the dataset provides a list of factual paragraphs (facts) from the case description.
361
- Each case is mapped to articles of ECHR that were allegedly violated (considered by the court)."""
362
- ),
363
- url="https://archive.org/details/ECtHR-NAACL2021",
364
- data_url="https://zenodo.org/record/5532997/files/ecthr.tar.gz",
365
- data_file="ecthr.jsonl",
366
- citation=textwrap.dedent(
367
- """\
368
- @inproceedings{chalkidis-etal-2021-paragraph,
369
- title = "Paragraph-level Rationale Extraction through Regularization: A case study on {E}uropean Court of Human Rights Cases",
370
- author = "Chalkidis, Ilias
371
- and Fergadiotis, Manos
372
- and Tsarapatsanis, Dimitrios
373
- and Aletras, Nikolaos
374
- and Androutsopoulos, Ion
375
- and Malakasiotis, Prodromos",
376
- booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
377
- year = "2021",
378
- address = "Online",
379
- url = "https://aclanthology.org/2021.naacl-main.22",
380
- }
381
- }"""
382
- ),
383
- ),
384
- LexGlueConfig(
385
- name="eurlex",
386
- description=textwrap.dedent(
387
- """\
388
- European Union (EU) legislation is published in EUR-Lex portal.
389
- All EU laws are annotated by EU's Publications Office with multiple concepts from the EuroVoc thesaurus,
390
- a multilingual thesaurus maintained by the Publications Office.
391
- The current version of EuroVoc contains more than 7k concepts referring to various activities
392
- of the EU and its Member States (e.g., economics, health-care, trade).
393
- Given a document, the task is to predict its EuroVoc labels (concepts)."""
394
- ),
395
- url="https://zenodo.org/record/5363165#.YVJOAi8RqaA",
396
- data_url="https://zenodo.org/record/5532997/files/eurlex.tar.gz",
397
- data_file="eurlex.jsonl",
398
- citation=textwrap.dedent(
399
- """\
400
- @inproceedings{chalkidis-etal-2021-multieurlex,
401
- author = {Chalkidis, Ilias and
402
- Fergadiotis, Manos and
403
- Androutsopoulos, Ion},
404
- title = {MultiEURLEX -- A multi-lingual and multi-label legal document
405
- classification dataset for zero-shot cross-lingual transfer},
406
- booktitle = {Proceedings of the 2021 Conference on Empirical Methods
407
- in Natural Language Processing},
408
- year = {2021},
409
- location = {Punta Cana, Dominican Republic},
410
- }
411
- }"""
412
- ),
413
- ),
414
- LexGlueConfig(
415
- name="scotus",
416
- description=textwrap.dedent(
417
- """\
418
- The US Supreme Court (SCOTUS) is the highest federal court in the United States of America
419
- and generally hears only the most controversial or otherwise complex cases which have not
420
- been sufficiently well solved by lower courts. This is a single-label multi-class classification
421
- task, where given a document (court opinion), the task is to predict the relevant issue areas.
422
- The 14 issue areas cluster 278 issues whose focus is on the subject matter of the controversy (dispute)."""
423
- ),
424
- url="http://scdb.wustl.edu/data.php",
425
- data_url="https://zenodo.org/record/5532997/files/scotus.tar.gz",
426
- data_file="scotus.jsonl",
427
- citation=textwrap.dedent(
428
- """\
429
- @misc{spaeth2020,
430
- author = {Harold J. Spaeth and Lee Epstein and Andrew D. Martin, Jeffrey A. Segal
431
- and Theodore J. Ruger and Sara C. Benesh},
432
- year = {2020},
433
- title ={{Supreme Court Database, Version 2020 Release 01}},
434
- url= {http://Supremecourtdatabase.org},
435
- howpublished={Washington University Law}
436
- }"""
437
- ),
438
- ),
439
- LexGlueConfig(
440
- name="ledgar",
441
- description=textwrap.dedent(
442
- """\
443
- LEDGAR dataset aims contract provision (paragraph) classification.
444
- The contract provisions come from contracts obtained from the US Securities and Exchange Commission (SEC)
445
- filings, which are publicly available from EDGAR. Each label represents the single main topic
446
- (theme) of the corresponding contract provision."""
447
- ),
448
- url="https://metatext.io/datasets/ledgar",
449
- data_url="https://zenodo.org/record/5532997/files/ledgar.tar.gz",
450
- data_file="ledgar.jsonl",
451
- citation=textwrap.dedent(
452
- """\
453
- @inproceedings{tuggener-etal-2020-ledgar,
454
- title = "{LEDGAR}: A Large-Scale Multi-label Corpus for Text Classification of Legal Provisions in Contracts",
455
- author = {Tuggener, Don and
456
- von D{\"a}niken, Pius and
457
- Peetz, Thomas and
458
- Cieliebak, Mark},
459
- booktitle = "Proceedings of the 12th Language Resources and Evaluation Conference",
460
- year = "2020",
461
- address = "Marseille, France",
462
- url = "https://aclanthology.org/2020.lrec-1.155",
463
- }
464
- }"""
465
- ),
466
- ),
467
- LexGlueConfig(
468
- name="unfair_tos",
469
- description=textwrap.dedent(
470
- """\
471
- The UNFAIR-ToS dataset contains 50 Terms of Service (ToS) from on-line platforms (e.g., YouTube,
472
- Ebay, Facebook, etc.). The dataset has been annotated on the sentence-level with 8 types of
473
- unfair contractual terms (sentences), meaning terms that potentially violate user rights
474
- according to the European consumer law."""
475
- ),
476
- url="http://claudette.eui.eu",
477
- data_url="https://zenodo.org/record/5532997/files/unfair_tos.tar.gz",
478
- data_file="unfair_tos.jsonl",
479
- citation=textwrap.dedent(
480
- """\
481
- @article{lippi-etal-2019-claudette,
482
- title = "{CLAUDETTE}: an automated detector of potentially unfair clauses in online terms of service",
483
- author = {Lippi, Marco
484
- and Pałka, Przemysław
485
- and Contissa, Giuseppe
486
- and Lagioia, Francesca
487
- and Micklitz, Hans-Wolfgang
488
- and Sartor, Giovanni
489
- and Torroni, Paolo},
490
- journal = "Artificial Intelligence and Law",
491
- year = "2019",
492
- publisher = "Springer",
493
- url = "https://doi.org/10.1007/s10506-019-09243-2",
494
- pages = "117--139",
495
- }"""
496
- ),
497
- ),
498
- LexGlueConfig(
499
- name="case_hold",
500
- description=textwrap.dedent(
501
- """\
502
- The CaseHOLD (Case Holdings on Legal Decisions) dataset contains approx. 53k multiple choice
503
- questions about holdings of US court cases from the Harvard Law Library case law corpus.
504
- Holdings are short summaries of legal rulings accompany referenced decisions relevant for the present case.
505
- The input consists of an excerpt (or prompt) from a court decision, containing a reference
506
- to a particular case, while the holding statement is masked out. The model must identify
507
- the correct (masked) holding statement from a selection of five choices."""
508
- ),
509
- url="https://github.com/reglab/casehold",
510
- data_url="https://zenodo.org/record/5532997/files/casehold.tar.gz",
511
- data_file="casehold.csv",
512
- citation=textwrap.dedent(
513
- """\
514
- @inproceedings{Zheng2021,
515
- author = {Lucia Zheng and
516
- Neel Guha and
517
- Brandon R. Anderson and
518
- Peter Henderson and
519
- Daniel E. Ho},
520
- title = {When Does Pretraining Help? Assessing Self-Supervised Learning for
521
- Law and the CaseHOLD Dataset},
522
- year = {2021},
523
- booktitle = {International Conference on Artificial Intelligence and Law},
524
- }"""
525
- ),
526
- ),
527
- ]
528
-
529
- def _info(self):
530
- return datasets.DatasetInfo(
531
- description=self.config.description,
532
- features=datasets.Features({
533
- "input": datasets.Value("string"),
534
- "references": datasets.features.Sequence(datasets.Value("string")),
535
- "gold": datasets.features.Sequence(datasets.Value("string"))
536
-
537
- }),
538
- homepage=self.config.url,
539
- citation=self.config.citation + "\n" + MAIN_CITATION,
540
- )
541
-
542
- def _split_generators(self, dl_manager):
543
- if self.config.name == "all":
544
- test = [dl_manager.download(os.path.join(name, "test.jsonl")) for name in ["ecthr_a", "ecthr_b", "scotus", "eurlex", "ledgar", "unfair_tos", "case_hold"]]
545
- train = [dl_manager.download(os.path.join(name, "train.jsonl")) for name in ["ecthr_a", "ecthr_b", "scotus", "eurlex", "ledgar", "unfair_tos", "case_hold"]]
546
- val = [dl_manager.download(os.path.join(name, "validation.jsonl")) for name in ["ecthr_a", "ecthr_b", "scotus", "eurlex", "ledgar", "unfair_tos", "case_hold"]]
547
- else:
548
- test = [dl_manager.download(os.path.join(self.config.name, "test.jsonl"))]
549
- train = [dl_manager.download(os.path.join(self.config.name, "train.jsonl"))]
550
- val = [dl_manager.download(os.path.join(self.config.name, "validation.jsonl"))]
551
-
552
- return [
553
- datasets.SplitGenerator(
554
- name=datasets.Split.TRAIN,
555
- gen_kwargs={"files": train},
556
- ),
557
- datasets.SplitGenerator(
558
- name=datasets.Split.VALIDATION,
559
- gen_kwargs={"files": val},
560
- ),
561
- datasets.SplitGenerator(
562
- name=datasets.Split.TEST,
563
- gen_kwargs={"files": test},
564
- ),
565
- ]
566
-
567
- def _generate_examples(self, files):
568
- """This function returns the examples in the raw (text) form."""
569
- for file in files:
570
- with open(file, "r") as f:
571
- for ix, line in enumerate(f):
572
- yield ix, json.loads(line)