Datasets:
Update README.md
Browse files
README.md
CHANGED
@@ -73,34 +73,44 @@ snips = Dataset.from_datasets("AutoIntent/snips")
|
|
73 |
This dataset is taken from `benayas/snips` and formatted with our [AutoIntent Library](https://deeppavlov.github.io/AutoIntent/index.html):
|
74 |
|
75 |
```python
|
76 |
-
#
|
|
|
|
|
77 |
from datasets import load_dataset
|
|
|
78 |
from autointent import Dataset
|
|
|
|
|
79 |
|
80 |
-
def
|
81 |
-
intent_names = sorted(
|
82 |
name_to_id = dict(zip(intent_names, range(len(intent_names)), strict=False))
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
for batch in snips_train.iter(batch_size=16, drop_last_batch=False):
|
95 |
for txt, name in zip(batch["text"], batch["category"], strict=False):
|
96 |
intent_id = name_to_id[name]
|
97 |
-
target_list =
|
98 |
target_list.append({"utterance": txt, "label": intent_id})
|
99 |
|
100 |
-
|
101 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
|
103 |
-
|
104 |
-
snips = load_dataset("benayas/snips")
|
105 |
-
snips_converted = convert_snips(snips["train"])
|
106 |
```
|
|
|
73 |
This dataset is taken from `benayas/snips` and formatted with our [AutoIntent Library](https://deeppavlov.github.io/AutoIntent/index.html):
|
74 |
|
75 |
```python
|
76 |
+
"""Convert snips dataset to autointent internal format and scheme.""" # noqa: INP001
|
77 |
+
|
78 |
+
from datasets import Dataset as HFDataset
|
79 |
from datasets import load_dataset
|
80 |
+
|
81 |
from autointent import Dataset
|
82 |
+
from autointent.schemas import Intent, Sample
|
83 |
+
|
84 |
|
85 |
+
def _extract_intents_data(split: HFDataset) -> tuple[dict[str, int], list[Intent]]:
|
86 |
+
intent_names = sorted(split.unique("category"))
|
87 |
name_to_id = dict(zip(intent_names, range(len(intent_names)), strict=False))
|
88 |
+
|
89 |
+
return name_to_id, [Intent(id=i, name=name) for i, name in enumerate(intent_names)]
|
90 |
+
|
91 |
+
|
92 |
+
def convert_snips(split: HFDataset, name_to_id: dict[str, int]) -> list[Sample]:
|
93 |
+
"""Convert one split into desired format."""
|
94 |
+
n_classes = len(name_to_id)
|
95 |
+
|
96 |
+
classwise_samples = [[] for _ in range(n_classes)]
|
97 |
+
|
98 |
+
for batch in split.iter(batch_size=16, drop_last_batch=False):
|
|
|
99 |
for txt, name in zip(batch["text"], batch["category"], strict=False):
|
100 |
intent_id = name_to_id[name]
|
101 |
+
target_list = classwise_samples[intent_id]
|
102 |
target_list.append({"utterance": txt, "label": intent_id})
|
103 |
|
104 |
+
return [Sample(**sample) for samples_from_one_class in classwise_samples for sample in samples_from_one_class]
|
105 |
+
|
106 |
+
|
107 |
+
if __name__ == "__main__":
|
108 |
+
snips = load_dataset("benayas/snips")
|
109 |
+
|
110 |
+
name_to_id, intents_data = _extract_intents_data(snips["train"])
|
111 |
+
|
112 |
+
train_samples = convert_snips(snips["train"], name_to_id)
|
113 |
+
test_samples = convert_snips(snips["test"], name_to_id)
|
114 |
|
115 |
+
dataset = Dataset.from_dict({"train": train_samples, "test": test_samples, "intents": intents_data})
|
|
|
|
|
116 |
```
|