Datasets:
Tasks:
Text Classification
Sub-tasks:
natural-language-inference
Languages:
Japanese
Size:
10K - 100K
License:
| import datasets as ds | |
| import pandas as pd | |
| _CITATION = """\ | |
| @InProceedings{yanaka-EtAl:2021:blackbox, | |
| author = {Yanaka, Hitomi and Mineshima, Koji}, | |
| title = {Assessing the Generalization Capacity of Pre-trained Language Models through Japanese Adversarial Natural Language Inference}, | |
| booktitle = {Proceedings of the 2021 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP (BlackboxNLP2021)}, | |
| year = {2021}, | |
| } | |
| """ | |
| _DESCRIPTION = """\ | |
| """ | |
| _HOMEPAGE = "https://github.com/verypluming/JaNLI" | |
| _LICENSE = "CC BY-SA 4.0" | |
| _DOWNLOAD_URL = "https://raw.githubusercontent.com/verypluming/JaNLI/main/janli.tsv" | |
| class JaNLIDataset(ds.GeneratorBasedBuilder): | |
| VERSION = ds.Version("1.0.0") | |
| def _info(self) -> ds.DatasetInfo: | |
| features = ds.Features( | |
| { | |
| "id": ds.Value("int64"), | |
| "sentence_A_Ja": ds.Value("string"), | |
| "sentence_B_Ja": ds.Value("string"), | |
| "entailment_label_Ja": ds.ClassLabel(names=["entailment", "non-entailment"]), | |
| "heuristics": ds.Value("string"), | |
| "number_of_NPs": ds.Value("int32"), | |
| "semtag": ds.Value("string"), | |
| } | |
| ) | |
| return ds.DatasetInfo( | |
| description=_DESCRIPTION, | |
| citation=_CITATION, | |
| homepage=_HOMEPAGE, | |
| license=_LICENSE, | |
| features=features, | |
| ) | |
| def _split_generators(self, dl_manager: ds.DownloadManager): | |
| data_path = dl_manager.download_and_extract(_DOWNLOAD_URL) | |
| df: pd.DataFrame = pd.read_table(data_path, header=0, sep="\t", index_col=0) | |
| df["id"] = df.index | |
| return [ | |
| ds.SplitGenerator( | |
| name=ds.Split.TRAIN, | |
| gen_kwargs={"df": df[df["split"] == "train"]}, | |
| ), | |
| ds.SplitGenerator( | |
| name=ds.Split.TEST, | |
| gen_kwargs={"df": df[df["split"] == "test"]}, | |
| ), | |
| ] | |
| def _generate_examples(self, df: pd.DataFrame): | |
| df = df.drop("split", axis=1) | |
| for i, row in enumerate(df.to_dict("records")): | |
| yield i, row | |