SaylorTwift HF Staff commited on
Commit
5320524
·
verified ·
1 Parent(s): 1e2148e

Delete loading script

Browse files
Files changed (1) hide show
  1. EntityMatching.py +0 -101
EntityMatching.py DELETED
@@ -1,101 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Covid Dialog dataset in English and Chinese"""
16
-
17
-
18
- import copy
19
- import os
20
- import re
21
- import textwrap
22
- import json
23
-
24
- import datasets
25
-
26
-
27
- # BibTeX citation
28
- _CITATION = """
29
- @inproceedings{mudgal2018deep,
30
- title={Deep learning for entity matching: A design space exploration},
31
- author={Mudgal, Sidharth and Li, Han and Rekatsinas, Theodoros and Doan, AnHai and Park, Youngchoon and Krishnan, Ganesh and Deep, Rohit and Arcaute, Esteban and Raghavendra, Vijay},
32
- booktitle={Proceedings of the 2018 International Conference on Management of Data},
33
- pages={19--34},
34
- year={2018}
35
- }
36
- """
37
-
38
- # Official description of the dataset
39
- _DESCRIPTION = textwrap.dedent(
40
- """
41
- """
42
- )
43
-
44
- # Link to an official homepage for the dataset here
45
- _HOMEPAGE = "https://github.com/anhaidgroup/deepmatcher/blob/master/Datasets.md"
46
-
47
- _LICENSE = ""
48
-
49
-
50
- import datasets
51
- import os
52
- import json
53
-
54
- names = ["Beer", "iTunes_Amazon", "Fodors_Zagats", "DBLP_ACM", "DBLP_GoogleScholar", "Amazon_Google", "Walmart_Amazon", "Abt_Buy", "Company", "Dirty_iTunes_Amazon", "Dirty_DBLP_ACM", "Dirty_DBLP_GoogleScholar", "Dirty_Walmart_Amazon"]
55
-
56
- class EntityMatching(datasets.GeneratorBasedBuilder):
57
- VERSION = datasets.Version("1.0.0")
58
-
59
- BUILDER_CONFIGS = [datasets.BuilderConfig(name=name, version=datasets.Version("1.0.0"), description=_DESCRIPTION) for name in names]
60
-
61
- def _info(self):
62
- features = datasets.Features(
63
- {
64
- "productA": datasets.Value("string"),
65
- "productB": datasets.Value("string"),
66
- "same": datasets.Value("bool_"),
67
- }
68
- )
69
- return datasets.DatasetInfo(
70
- description=f"EntityMatching dataset, as preprocessed and shuffled in HELM",
71
- features=features,
72
- homepage=_HOMEPAGE,
73
- license=_LICENSE,
74
- citation=_CITATION,
75
- )
76
-
77
- def _split_generators(self, dl_manager):
78
- test = dl_manager.download(os.path.join(self.config.name, "test.jsonl"))
79
- train = dl_manager.download(os.path.join(self.config.name, "train.jsonl"))
80
- val = dl_manager.download(os.path.join(self.config.name, "valid.jsonl"))
81
-
82
- return [
83
- datasets.SplitGenerator(
84
- name=datasets.Split.TRAIN,
85
- gen_kwargs={"file": train},
86
- ),
87
- datasets.SplitGenerator(
88
- name=datasets.Split.VALIDATION,
89
- gen_kwargs={"file": val},
90
- ),
91
- datasets.SplitGenerator(
92
- name=datasets.Split.TEST,
93
- gen_kwargs={"file": test},
94
- ),
95
- ]
96
-
97
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
98
- def _generate_examples(self, file):
99
- with open(file, encoding="utf-8") as f:
100
- for ix, line in enumerate(f):
101
- yield ix, json.loads(line)