albertvillanova HF Staff commited on
Commit
f67aca9
·
verified ·
1 Parent(s): f54216c

Delete loading script

Browse files
Files changed (1) hide show
  1. hate_speech_pl.py +0 -111
hate_speech_pl.py DELETED
@@ -1,111 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """HateSpeech Corpus for Polish"""
16
-
17
-
18
- import csv
19
-
20
- import datasets
21
-
22
-
23
- # Find for instance the citation on arxiv or on the dataset repo/website
24
- _CITATION = r"""\
25
- @article{troszynski2017czy,
26
- title={Czy komputer rozpozna hejtera? Wykorzystanie uczenia maszynowego (ML) w jako{\'s}ciowej analizie danych},
27
- author={Troszy{\'n}ski, Marek and Wawer, Aleksandra},
28
- journal={Przegl{\k{a}}d Socjologii Jako{\'s}ciowej},
29
- volume={13},
30
- number={2},
31
- pages={62--80},
32
- year={2017},
33
- publisher={Uniwersytet {\L}{\'o}dzki, Wydzia{\l} Ekonomiczno-Socjologiczny, Katedra Socjologii~…}
34
- }
35
- """
36
-
37
- _DESCRIPTION = """\
38
- HateSpeech corpus in the current version contains over 2000 posts crawled from public Polish web. They represent various types and degrees of offensive language, expressed toward minorities (eg. ethnical, racial). The data were annotated manually.
39
- """
40
-
41
- _HOMEPAGE = "http://zil.ipipan.waw.pl/HateSpeech"
42
-
43
- _LICENSE = "CC BY-NC-SA"
44
-
45
- _URLs = [
46
- "https://raw.githubusercontent.com/aiembassy/hatespeech-corpus-pl/master/data/fragment_anotatora_2011_ZK.csv",
47
- "https://raw.githubusercontent.com/aiembassy/hatespeech-corpus-pl/master/data/fragment_anotatora_2011b.csv",
48
- "https://raw.githubusercontent.com/aiembassy/hatespeech-corpus-pl/master/data/fragment_anotatora_2012_luty.csv",
49
- ]
50
-
51
-
52
- class HateSpeechPl(datasets.GeneratorBasedBuilder):
53
- """HateSpeech Corpus for Polish"""
54
-
55
- VERSION = datasets.Version("1.1.0")
56
-
57
- def _info(self):
58
- return datasets.DatasetInfo(
59
- description=_DESCRIPTION,
60
- features=datasets.Features(
61
- {
62
- "id": datasets.Value("uint16"),
63
- "text_id": datasets.Value("uint32"),
64
- "annotator_id": datasets.Value("uint8"),
65
- "minority_id": datasets.Value("uint8"),
66
- "negative_emotions": datasets.Value("bool"),
67
- "call_to_action": datasets.Value("bool"),
68
- "source_of_knowledge": datasets.Value("uint8"),
69
- "irony_sarcasm": datasets.Value("bool"),
70
- "topic": datasets.Value("uint8"),
71
- "text": datasets.Value("string"),
72
- "rating": datasets.Value("uint8"),
73
- }
74
- ),
75
- supervised_keys=None,
76
- license=_LICENSE,
77
- citation=_CITATION,
78
- )
79
-
80
- def _split_generators(self, dl_manager):
81
- """Returns SplitGenerators."""
82
- my_urls = _URLs
83
- filepaths = dl_manager.download(my_urls)
84
- return [
85
- datasets.SplitGenerator(
86
- name=datasets.Split.TRAIN,
87
- gen_kwargs={
88
- "filepaths": filepaths,
89
- },
90
- ),
91
- ]
92
-
93
- def _generate_examples(self, filepaths):
94
- """Yields examples."""
95
- for file_id_, filepath in enumerate(filepaths):
96
- with open(filepath, encoding="utf-8") as f:
97
- csv_reader = csv.DictReader(f, delimiter=",", escapechar="\\")
98
- for id_, data in enumerate(csv_reader):
99
- yield f"{file_id_}/{id_}", {
100
- "id": data["id_fragmentu"],
101
- "text_id": data["id_tekstu"],
102
- "annotator_id": data["id_anotatora"],
103
- "minority_id": data["id_mniejszosci"],
104
- "negative_emotions": data["negatywne_emocje"],
105
- "call_to_action": data["wezw_ddzial"],
106
- "source_of_knowledge": data["typ_ramki"],
107
- "irony_sarcasm": data["ironia_sarkazm"],
108
- "topic": data["temat"],
109
- "text": data["tekst"],
110
- "rating": data["ocena"],
111
- }