Upload TestDataset.py with huggingface_hub
Browse files- TestDataset.py +24 -5
TestDataset.py
CHANGED
@@ -3,10 +3,11 @@ import pandas as pd
|
|
3 |
import datasets
|
4 |
from glob import glob
|
5 |
import zipfile
|
6 |
-
|
|
|
7 |
class TestDataset(datasets.GeneratorBasedBuilder):
|
8 |
def _info(self):
|
9 |
-
return datasets.DatasetInfo(features=datasets.Features({'
|
10 |
|
11 |
def extract_all(self, dir):
|
12 |
zip_files = glob(dir+'/**/**.zip', recursive=True)
|
@@ -28,14 +29,32 @@ class TestDataset(datasets.GeneratorBasedBuilder):
|
|
28 |
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepaths': {'inputs':downloaded_files} })]
|
29 |
|
30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
def _generate_examples(self, filepaths):
|
32 |
_id = 0
|
33 |
for i,filepath in enumerate(filepaths['inputs']):
|
34 |
-
df =
|
35 |
if len(df.columns) != 3:
|
36 |
continue
|
37 |
-
df.columns = ['
|
38 |
for _, record in df.iterrows():
|
39 |
-
yield str(_id), {'
|
40 |
_id += 1
|
41 |
|
|
|
3 |
import datasets
|
4 |
from glob import glob
|
5 |
import zipfile
|
6 |
+
import re
|
7 |
+
from bs4 import BeautifulSoup
|
8 |
class TestDataset(datasets.GeneratorBasedBuilder):
|
9 |
def _info(self):
|
10 |
+
return datasets.DatasetInfo(features=datasets.Features({'Name':datasets.Value('string'),'Age':datasets.Value('string'),'Gender':datasets.Value('string')}))
|
11 |
|
12 |
def extract_all(self, dir):
|
13 |
zip_files = glob(dir+'/**/**.zip', recursive=True)
|
|
|
29 |
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepaths': {'inputs':downloaded_files} })]
|
30 |
|
31 |
|
32 |
+
|
33 |
+
def get_data(self, bs, column):
|
34 |
+
elements = [attr[column] for attr in bs.find_all(attrs={column : re.compile(".")})]
|
35 |
+
if len(elements) == 0:
|
36 |
+
elements = [el.text for el in bs.find_all(column)]
|
37 |
+
return elements
|
38 |
+
|
39 |
+
def read_xml(self, path, columns):
|
40 |
+
with open(path, 'rb') as f:
|
41 |
+
data = f.read()
|
42 |
+
|
43 |
+
bs = BeautifulSoup(data, "xml")
|
44 |
+
data = {}
|
45 |
+
for column in columns:
|
46 |
+
elements = self.get_data(bs, column)
|
47 |
+
data[column] = elements
|
48 |
+
return pd.DataFrame(data)
|
49 |
+
|
50 |
def _generate_examples(self, filepaths):
|
51 |
_id = 0
|
52 |
for i,filepath in enumerate(filepaths['inputs']):
|
53 |
+
df = self.read_xml(filepath, ['name', 'age', 'gender'])
|
54 |
if len(df.columns) != 3:
|
55 |
continue
|
56 |
+
df.columns = ['Name', 'Age', 'Gender']
|
57 |
for _, record in df.iterrows():
|
58 |
+
yield str(_id), {'Name':record['Name'],'Age':record['Age'],'Gender':record['Gender']}
|
59 |
_id += 1
|
60 |
|