hojjat-m commited on
Commit
3cd7e86
·
1 Parent(s): 9d315c2

Create DigiMag.py

Browse files
Files changed (1) hide show
  1. DigiMag.py +81 -0
DigiMag.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import csv
3
+ import datasets
4
+ import requests
5
+ import os
6
+
7
+ _CITATION = """\\
8
+ @article{ParsBERT,
9
+ title={ParsBERT: Transformer-based Model for Persian Language Understanding},
10
+ author={Mehrdad Farahani, Mohammad Gharachorloo, Marzieh Farahani, Mohammad Manthouri},
11
+ journal={ArXiv},
12
+ year={2020},
13
+ volume={abs/2005.12515}
14
+ }
15
+ """
16
+ _DESCRIPTION = """\\\\\\\\
17
+ A total of 8,515 articles scraped from Digikala Online Magazine. This dataset includes seven different classes.
18
+ """
19
+
20
+ _DRIVE_URL = "https://drive.google.com/uc?export=download&id=1YgrCYY-Z0h2z0-PfWVfOGt1Tv0JDI-qz"
21
+
22
+ class DigiMagConfig(datasets.BuilderConfig):
23
+ """BuilderConfig for DigiMag."""
24
+ def __init__(self, **kwargs):
25
+ super(DigiMagConfig, self).__init__(**kwargs)
26
+
27
+ class DigiMag(datasets.GeneratorBasedBuilder):
28
+ BUILDER_CONFIGS = [
29
+ DigiMagConfig(name="DigiMag", version=datasets.Version("1.0.0"), description="persian classification dataset on digimag articles"),
30
+ ]
31
+ def _info(self):
32
+ return datasets.DatasetInfo(
33
+ # This is the description that will appear on the datasets page.
34
+ description=_DESCRIPTION,
35
+ # datasets.features.FeatureConnectors
36
+ features=datasets.Features(
37
+ {
38
+ "content": datasets.Value("string"),
39
+ "label": datasets.Value("string"),
40
+ "label_id": datasets.Value(dtype='int64')
41
+ }
42
+ ),
43
+ supervised_keys=None,
44
+ # Homepage of the dataset for documentation
45
+ homepage="https://hooshvare.github.io/docs/datasets/tc#digikala-magazine-digimag",
46
+ citation=_CITATION,
47
+ )
48
+
49
+ def custom_dataset(src_url, dest_path):
50
+ response = requests.get(src_url)
51
+ response.raise_for_status()
52
+
53
+ with open(dest_path, 'wb') as f:
54
+ f.write(response.content)
55
+
56
+
57
+ def _split_generators(self, dl_manager):
58
+ """Returns SplitGenerators."""
59
+ # dl_manager is a datasets.download.DownloadManager that can be used to
60
+ # download and extract URLs
61
+
62
+ downloaded_file = dl_manager.download_custom(_DRIVE_URL, custom_dataset)
63
+ extracted_file = dl_manager.extract(downloaded_file)
64
+ return [
65
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(extracted_file, 'digimag/train.csv')}),
66
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": extracted_file, 'digimag/test.csv')}),
67
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": extracted_file, 'digimag/dev.csv')}),
68
+ ]
69
+
70
+ def _generate_examples(self, filepath):
71
+ try:
72
+ with open(filepath, encoding="utf-8") as f:
73
+ reader = csv.DictReader(f, delimiter="\t")
74
+ for idx, row in enumerate(reader):
75
+ yield idx, {
76
+ "content": row["content"],
77
+ "label": row["label"],
78
+ "label_id": row["label_id"],
79
+ }
80
+ except Exception as e:
81
+ print(e)