sha
stringlengths 40
40
| text
stringlengths 0
13.4M
| id
stringlengths 2
117
| tags
list | created_at
stringlengths 25
25
| metadata
stringlengths 2
31.7M
| last_modified
stringlengths 25
25
|
---|---|---|---|---|---|---|
4d662ed99a3e8ac8d1d2e5a5599ad825aef7da44
|
# Dataset Card for "hellenistic-greek-lemmas"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
ryderwishart/hellenistic-greek-lemmas
|
[
"region:us"
] |
2023-03-21T15:45:29+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 146758866, "num_examples": 1086734}, {"name": "test", "num_bytes": 14082903, "num_examples": 135842}, {"name": "eval", "num_bytes": 18754547, "num_examples": 135842}], "download_size": 6083954, "dataset_size": 179596316}}
|
2023-03-21T16:45:22+00:00
|
3ef5968373535ff75324969314660af3d444385f
|
# Dataset Card for "portuguese_wikineural"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
arubenruben/portuguese_wikineural
|
[
"region:us"
] |
2023-03-21T16:07:03+00:00
|
{"dataset_info": {"features": [{"name": "tokens", "sequence": "string"}, {"name": "ner_tags", "sequence": {"class_label": {"names": {"0": "O", "1": "B-PER", "2": "I-PER", "3": "B-ORG", "4": "I-ORG", "5": "B-LOC", "6": "I-LOC", "7": "B-MISC", "8": "I-MISC"}}}}], "splits": [{"name": "train", "num_bytes": 33140600, "num_examples": 80560}, {"name": "test", "num_bytes": 4400517, "num_examples": 10160}, {"name": "validation", "num_bytes": 4384834, "num_examples": 10070}], "download_size": 10275737, "dataset_size": 41925951}}
|
2023-04-10T12:45:47+00:00
|
89d9ace53b05723ed371e55e91bc1ec07afcc7ad
|
# Dataset Card for "lam_gender"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
lamnt2008/lam_gender
|
[
"region:us"
] |
2023-03-21T16:12:59+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "female", "1": "male"}}}}], "splits": [{"name": "train", "num_bytes": 886700538.492, "num_examples": 188402}, {"name": "validation", "num_bytes": 34511251.337, "num_examples": 10617}], "download_size": 1046144749, "dataset_size": 921211789.829}}
|
2023-03-21T16:28:31+00:00
|
bd6b2838302f0ec211c6e1a40c67627736e0bee4
|
# Dataset Card for "news-programmatic-labeling"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
dvilasuero/news-programmatic-labeling
|
[
"region:us"
] |
2023-03-21T16:14:53+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "Business", "1": "Sci/Tech", "2": "Sports", "3": "World"}}}}], "splits": [{"name": "train", "num_bytes": 167543.4299287411, "num_examples": 673}, {"name": "test", "num_bytes": 42072.57007125891, "num_examples": 169}], "download_size": 146616, "dataset_size": 209616.0}}
|
2023-03-21T16:14:55+00:00
|
2fa911a56b9c5f19cb84a2934136e8745c7a0f25
|
```
@article{salvatore2019logical,
title={A logical-based corpus for cross-lingual evaluation},
author={Salvatore, Felipe and Finger, Marcelo and Hirata Jr, Roberto},
journal={arXiv preprint arXiv:1905.05704},
year={2019}
}
```
|
metaeval/clcd-english
|
[
"language:en",
"license:apache-2.0",
"region:us"
] |
2023-03-21T16:32:25+00:00
|
{"language": ["en"], "license": "apache-2.0"}
|
2023-06-22T13:07:47+00:00
|
625d605e9450371a66e23c98a3033a98fffcceb8
|
# Dataset Card for "new-testament-syntax"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
ryderwishart/new-testament-syntax
|
[
"region:us"
] |
2023-03-21T16:35:19+00:00
|
{"dataset_info": {"features": [{"name": "morphgnt_id", "dtype": "string"}, {"name": "morphgnt_head_id", "dtype": "string"}, {"name": "function", "dtype": "string"}, {"name": "ref", "dtype": "string"}, {"name": "Cat", "dtype": "string"}, {"name": "Start", "dtype": "string"}, {"name": "End", "dtype": "string"}, {"name": "StrongNumber", "dtype": "string"}, {"name": "UnicodeLemma", "dtype": "string"}, {"name": "Gender", "dtype": "string"}, {"name": "Number", "dtype": "string"}, {"name": "FunctionalTag", "dtype": "string"}, {"name": "Type", "dtype": "string"}, {"name": "morphId", "dtype": "string"}, {"name": "NormalizedForm", "dtype": "string"}, {"name": "Case", "dtype": "string"}, {"name": "Unicode", "dtype": "string"}, {"name": "FormalTag", "dtype": "string"}, {"name": "nodeId", "dtype": "string"}, {"name": "Gloss", "dtype": "string"}, {"name": "LexDomain", "dtype": "string"}, {"name": "LN", "dtype": "string"}, {"name": "Mood", "dtype": "string"}, {"name": "Tense", "dtype": "string"}, {"name": "Voice", "dtype": "string"}, {"name": "SubjRef", "dtype": "string"}, {"name": "Frame", "dtype": "string"}, {"name": "Ref", "dtype": "string"}, {"name": "Person", "dtype": "string"}, {"name": "Degree", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 38838442, "num_examples": 137779}], "download_size": 10423268, "dataset_size": 38838442}}
|
2023-03-22T21:07:33+00:00
|
6bdcaf5faab615ed9d0299830143a2b777b2f295
|
# Dataset Card for "Rettsavgjoerelser"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
MasterThesisCBS/Rettsavgjoerelser
|
[
"region:us"
] |
2023-03-21T16:36:53+00:00
|
{"dataset_info": {"features": [{"name": "url", "dtype": "string"}, {"name": "sak", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "stikkord", "dtype": "string"}, {"name": "Prompt w/suffix", "dtype": "string"}, {"name": "Prompt w/o suffix", "dtype": "string"}, {"name": "prompt length", "dtype": "int64"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 950306420, "num_examples": 8044}, {"name": "test", "num_bytes": 49390818, "num_examples": 424}], "download_size": 494719016, "dataset_size": 999697238}}
|
2023-03-23T11:10:21+00:00
|
04346ad8fef940ff7056879d78d40b9c136416b7
|
从小说以及其他来源提取的单/多轮对话语料。
|
wybxc/books
|
[
"task_categories:text-generation",
"size_categories:100K<n<1M",
"language:zh",
"license:odc-by",
"region:us"
] |
2023-03-21T16:43:43+00:00
|
{"language": ["zh"], "license": "odc-by", "size_categories": ["100K<n<1M"], "task_categories": ["text-generation"]}
|
2023-04-03T11:47:08+00:00
|
b97a9adfe3ed323eca4782a297a21cb44efb1ba9
|
davanstrien/fuego-20230321-171519-61dbff
|
[
"fuego",
"region:us"
] |
2023-03-21T17:15:21+00:00
|
{"tags": ["fuego"], "fuego": {"id": "20230321-171519-61dbff", "status": "preparing", "script": "script.py", "requirements_file": "requirements.txt", "space_id": "davanstrien/fuego-20230321-171519-61dbff", "space_hardware": "cpu-basic"}}
|
2023-03-21T17:15:25+00:00
|
|
dcd676066b67c4aa666906ab3cfbfb46d23e69a1
|
marcingordon/testing-donut
|
[
"region:us"
] |
2023-03-21T17:29:27+00:00
|
{}
|
2023-03-21T17:29:49+00:00
|
|
546cba9a268d3c45a392baf1245ba39201f020db
|
# Dataset Card for "disc_cla_primera"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Sleoruiz/disc_cla_primera
|
[
"region:us"
] |
2023-03-21T17:51:14+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "inputs", "struct": [{"name": "text", "dtype": "string"}]}, {"name": "prediction", "list": [{"name": "label", "dtype": "string"}, {"name": "score", "dtype": "float64"}]}, {"name": "prediction_agent", "dtype": "string"}, {"name": "annotation", "sequence": "string"}, {"name": "annotation_agent", "dtype": "string"}, {"name": "multi_label", "dtype": "bool"}, {"name": "explanation", "dtype": "null"}, {"name": "id", "dtype": "string"}, {"name": "metadata", "dtype": "null"}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "timestamp[us]"}, {"name": "metrics", "struct": [{"name": "text_length", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 60679317, "num_examples": 11713}], "download_size": 32426863, "dataset_size": 60679317}}
|
2023-03-21T17:51:19+00:00
|
200e880a43c209b33599c64a6ca30dd15171d6cf
|
# Dataset Card for "disc_cla_segunda"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Sleoruiz/disc_cla_segunda
|
[
"region:us"
] |
2023-03-21T17:52:22+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "inputs", "struct": [{"name": "text", "dtype": "string"}]}, {"name": "prediction", "list": [{"name": "label", "dtype": "string"}, {"name": "score", "dtype": "float64"}]}, {"name": "prediction_agent", "dtype": "string"}, {"name": "annotation", "sequence": "string"}, {"name": "annotation_agent", "dtype": "string"}, {"name": "multi_label", "dtype": "bool"}, {"name": "explanation", "dtype": "null"}, {"name": "id", "dtype": "string"}, {"name": "metadata", "dtype": "null"}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "timestamp[us]"}, {"name": "metrics", "struct": [{"name": "text_length", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 30333232, "num_examples": 7327}], "download_size": 15864012, "dataset_size": 30333232}}
|
2023-03-21T17:52:27+00:00
|
fb3d69e52d48a6b9de217e9a29fd3253b559bed7
|
# Dataset Card for "disc_cla_tercera"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Sleoruiz/disc_cla_tercera
|
[
"region:us"
] |
2023-03-21T17:53:12+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "inputs", "struct": [{"name": "text", "dtype": "string"}]}, {"name": "prediction", "list": [{"name": "label", "dtype": "string"}, {"name": "score", "dtype": "float64"}]}, {"name": "prediction_agent", "dtype": "string"}, {"name": "annotation", "sequence": "string"}, {"name": "annotation_agent", "dtype": "string"}, {"name": "multi_label", "dtype": "bool"}, {"name": "explanation", "dtype": "null"}, {"name": "id", "dtype": "string"}, {"name": "metadata", "dtype": "null"}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "timestamp[us]"}, {"name": "metrics", "struct": [{"name": "text_length", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 15774540, "num_examples": 4913}], "download_size": 8277875, "dataset_size": 15774540}}
|
2023-03-21T17:53:16+00:00
|
28c5eed0d7387e5b28c165303311bb3d6b20a0f0
|
# Dataset Card for "disc_cla_cuarta"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Sleoruiz/disc_cla_cuarta
|
[
"region:us"
] |
2023-03-21T17:54:41+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "inputs", "struct": [{"name": "text", "dtype": "string"}]}, {"name": "prediction", "list": [{"name": "label", "dtype": "string"}, {"name": "score", "dtype": "float64"}]}, {"name": "prediction_agent", "dtype": "string"}, {"name": "annotation", "sequence": "string"}, {"name": "annotation_agent", "dtype": "string"}, {"name": "multi_label", "dtype": "bool"}, {"name": "explanation", "dtype": "null"}, {"name": "id", "dtype": "string"}, {"name": "metadata", "dtype": "null"}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "timestamp[us]"}, {"name": "metrics", "struct": [{"name": "text_length", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 14861447, "num_examples": 3349}], "download_size": 7807410, "dataset_size": 14861447}}
|
2023-03-21T17:54:46+00:00
|
c1152f5e2abbc360fb05e7efd7af7dd9124c5f27
|
# Dataset Card for "disc_cla_quinta"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Sleoruiz/disc_cla_quinta
|
[
"region:us"
] |
2023-03-21T17:56:25+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "inputs", "struct": [{"name": "text", "dtype": "string"}]}, {"name": "prediction", "list": [{"name": "label", "dtype": "string"}, {"name": "score", "dtype": "float64"}]}, {"name": "prediction_agent", "dtype": "string"}, {"name": "annotation", "sequence": "string"}, {"name": "annotation_agent", "dtype": "string"}, {"name": "multi_label", "dtype": "bool"}, {"name": "explanation", "dtype": "null"}, {"name": "id", "dtype": "string"}, {"name": "metadata", "dtype": "null"}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "timestamp[us]"}, {"name": "metrics", "struct": [{"name": "text_length", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 38464874, "num_examples": 7507}], "download_size": 20620936, "dataset_size": 38464874}}
|
2023-03-21T17:56:29+00:00
|
2fd620e010ad25390b3296c23cb121b4e4bbefa7
|
# Dataset Card for "disc_cla_sexta"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Sleoruiz/disc_cla_sexta
|
[
"region:us"
] |
2023-03-21T17:58:10+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "inputs", "struct": [{"name": "text", "dtype": "string"}]}, {"name": "prediction", "list": [{"name": "label", "dtype": "string"}, {"name": "score", "dtype": "float64"}]}, {"name": "prediction_agent", "dtype": "string"}, {"name": "annotation", "sequence": "string"}, {"name": "annotation_agent", "dtype": "string"}, {"name": "multi_label", "dtype": "bool"}, {"name": "explanation", "dtype": "null"}, {"name": "id", "dtype": "string"}, {"name": "metadata", "dtype": "null"}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "timestamp[us]"}, {"name": "metrics", "struct": [{"name": "text_length", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 27186116, "num_examples": 7591}], "download_size": 14208855, "dataset_size": 27186116}}
|
2023-03-21T17:58:14+00:00
|
abad78880a277d77cb825ce7686c4a7a427a8ff0
|
# Dataset Card for "disc_cla_septima"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Sleoruiz/disc_cla_septima
|
[
"region:us"
] |
2023-03-21T18:00:04+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "inputs", "struct": [{"name": "text", "dtype": "string"}]}, {"name": "prediction", "list": [{"name": "label", "dtype": "string"}, {"name": "score", "dtype": "float64"}]}, {"name": "prediction_agent", "dtype": "string"}, {"name": "annotation", "sequence": "string"}, {"name": "annotation_agent", "dtype": "string"}, {"name": "multi_label", "dtype": "bool"}, {"name": "explanation", "dtype": "null"}, {"name": "id", "dtype": "string"}, {"name": "metadata", "dtype": "null"}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "timestamp[us]"}, {"name": "metrics", "struct": [{"name": "text_length", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 39746420, "num_examples": 9432}], "download_size": 20745223, "dataset_size": 39746420}}
|
2023-03-21T18:00:08+00:00
|
cf822853cceca59be7f32cec944b56ed630973dc
|
# Dataset Card for "nllb_lug_en_vigorous_clean"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mekaneeky/nllb_lug_en_vigorous_clean
|
[
"region:us"
] |
2023-03-21T18:02:39+00:00
|
{"dataset_info": {"features": [{"name": "translation", "dtype": {"translation": {"languages": ["eng_Latn", "lug_Latn"]}}}, {"name": "laser_score", "dtype": "float32"}, {"name": "source_sentence_lid", "dtype": "float32"}, {"name": "target_sentence_lid", "dtype": "float32"}, {"name": "source_sentence_source", "dtype": "string"}, {"name": "source_sentence_url", "dtype": "string"}, {"name": "target_sentence_source", "dtype": "string"}, {"name": "target_sentence_url", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 44321608.60547476, "num_examples": 94114}], "download_size": 21686104, "dataset_size": 44321608.60547476}}
|
2023-03-21T18:03:01+00:00
|
f37643e3c8bafb15cd9ceca03a3ee90a98088d83
|
# Dataset Card for "split-imdb"
|
Deysi/split-imdb
|
[
"task_categories:text-classification",
"size_categories:10K<n<100K",
"language:en",
"sentiment analysis",
"region:us"
] |
2023-03-21T18:25:05+00:00
|
{"language": ["en"], "size_categories": ["10K<n<100K"], "task_categories": ["text-classification"], "pretty_name": "Split dataset for imdb film reviews", "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 46538455.6, "num_examples": 35000}, {"name": "test", "num_bytes": 9972526.2, "num_examples": 7500}, {"name": "valid", "num_bytes": 9972526.2, "num_examples": 7500}], "download_size": 0, "dataset_size": 66483508}, "tags": ["sentiment analysis"]}
|
2023-03-21T22:55:45+00:00
|
03a78b0ca3a248d3c1ac7bcad9913da84811989e
|
### Dataset is imported from CodeXGLUE and pre-processed using their script.
# Where to find in Semeru:
The dataset can be found at /nfs/semeru/semeru_datasets/code_xglue/code-to-code/Clone-detection-BigCloneBench in Semeru
# CodeXGLUE -- Clone Detection (BCB)
## Task Definition
Given two codes as the input, the task is to do binary classification (0/1), where 1 stands for semantic equivalence and 0 for others. Models are evaluated by F1 score.
## Updates
2021-9-13: We have update the evaluater script. Since it's a binary classification, we use binary F1 score instead of "macro" F1 score.
## Dataset
The dataset we use is [BigCloneBench](https://www.cs.usask.ca/faculty/croy/papers/2014/SvajlenkoICSME2014BigERA.pdf) and filtered following the paper [Detecting Code Clones with Graph Neural Network and Flow-Augmented Abstract Syntax Tree](https://arxiv.org/pdf/2002.08653.pdf).
### Data Format
1. dataset/data.jsonl is stored in jsonlines format. Each line in the uncompressed file represents one function. One row is illustrated below.
- **func:** the function
- **idx:** index of the example
2. train.txt/valid.txt/test.txt provide examples, stored in the following format: idx1 idx2 label
### Data Statistics
Data statistics of the dataset are shown in the below table:
| | #Examples |
| ----- | :-------: |
| Train | 901,028 |
| Dev | 415,416 |
| Test | 415,416 |
## Reference
<pre><code>@inproceedings{svajlenko2014towards,
title={Towards a big data curated benchmark of inter-project code clones},
author={Svajlenko, Jeffrey and Islam, Judith F and Keivanloo, Iman and Roy, Chanchal K and Mia, Mohammad Mamun},
booktitle={2014 IEEE International Conference on Software Maintenance and Evolution},
pages={476--480},
year={2014},
organization={IEEE}
}
@inproceedings{wang2020detecting,
title={Detecting Code Clones with Graph Neural Network and Flow-Augmented Abstract Syntax Tree},
author={Wang, Wenhan and Li, Ge and Ma, Bo and Xia, Xin and Jin, Zhi},
booktitle={2020 IEEE 27th International Conference on Software Analysis, Evolution and Reengineering (SANER)},
pages={261--271},
year={2020},
organization={IEEE}
}</code></pre>
|
semeru/Code-Code-CloneDetection-BigCloneBench
|
[
"license:mit",
"arxiv:2002.08653",
"region:us"
] |
2023-03-21T18:37:21+00:00
|
{"license": "mit", "Programminglanguage": "Java", "version": "N/A", "Date": "2014 Big clone bench paper https://www.cs.usask.ca/faculty/croy/papers/2014/SvajlenkoICSME2014BigERA.pdf", "Contaminated": "Very Likely", "Size": "Standard Tokenizer"}
|
2023-03-27T17:31:02+00:00
|
218877649f258051ca35532e4ae8a52db7ff0143
|
# Dataset Card for DeTexD: A Benchmark Dataset for Delicate Text Detection
## Dataset Description
- **Repository:** [DeTexD repository](https://github.com/grammarly/detexd)
- **Paper:** [DeTexD: A Benchmark Dataset for Delicate Text Detection](TODO)
### Dataset Summary
We define *delicate text* as any text that is emotionally charged or potentially triggering such that engaging with it has the potential to result in harm. This broad term covers a range of sensitive texts that vary across four major dimensions: 1) riskiness, 2) explicitness, 3) topic, and 4) target.
This dataset contains texts with fine-grained individual annotator labels from 0 to 5 (where 0 indicates no risk and 5 indicates high risk) and averaged binary labels. See paper for more details.
**Repository:** [DeTexD repository](https://github.com/grammarly/detexd) <br>
**Paper:** [DeTexD: A Benchmark Dataset for Delicate Text Detection](TODO)
## Dataset Structure
### Data Instances
```
{'text': '"He asked me and the club if we could give him a couple of days off just to clear up his mind and he will be back in the group, I suppose, next Monday, back for training and then be a regular part of the whole squad again," Rangnick said.',
'annotator_1': 0,
'annotator_2': 0,
'annotator_3': 0,
'label': 0}
```
### Data Fields
- `text`: Text to be classified
- `annotator_1`: Annotator 1 score (0-5)
- `annotator_2`: Annotator 2 score (0-5)
- `annotator_3`: Annotator 3 score (0-5)
- `label`: Averaged binary score (>=3), either "negative" (0) or positive (1)
### Data Splits
| | test |
|--------------------|-----:|
| Number of examples | 1023 |
### Citation Information
```
@inproceedings{chernodub-etal-2023-detexd,
title = "{D}e{T}ex{D}: A Benchmark Dataset for Delicate Text Detection",
author = "Yavnyi, Serhii and Sliusarenko, Oleksii and Razzaghi, Jade and Mo, Yichen and Hovakimyan, Knar and Chernodub, Artem",
booktitle = "The 7th Workshop on Online Abuse and Harms (WOAH)",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.woah-1.2",
pages = "14--28",
abstract = "Over the past few years, much research has been conducted to identify and regulate toxic language. However, few studies have addressed a broader range of sensitive texts that are not necessarily overtly toxic. In this paper, we introduce and define a new category of sensitive text called {``}delicate text.{''} We provide the taxonomy of delicate text and present a detailed annotation scheme. We annotate DeTexD, the first benchmark dataset for delicate text detection. The significance of the difference in the definitions is highlighted by the relative performance deltas between models trained each definitions and corpora and evaluated on the other. We make publicly available the DeTexD Benchmark dataset, annotation guidelines, and baseline model for delicate text detection.",
}
```
|
grammarly/detexd-benchmark
|
[
"task_categories:text-classification",
"size_categories:1K<n<10K",
"language:en",
"license:apache-2.0",
"region:us"
] |
2023-03-21T18:44:32+00:00
|
{"language": ["en"], "license": "apache-2.0", "size_categories": ["1K<n<10K"], "task_categories": ["text-classification"], "pretty_name": "DeTexD: A Benchmark Dataset for Delicate Text Detection", "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "annotator_1", "dtype": "int32"}, {"name": "annotator_2", "dtype": "int32"}, {"name": "annotator_3", "dtype": "int32"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "negative", "1": "positive"}}}}], "splits": [{"name": "test", "num_examples": 1023}]}}
|
2023-07-10T16:36:37+00:00
|
0178d208f04726d0430236e7beba43d1c10c720a
|
# Dataset Card for "fungi_futures"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
pimentooliver/fungifutures
|
[
"region:us"
] |
2023-03-21T18:59:12+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3671136212.896, "num_examples": 4657}], "download_size": 3127940814, "dataset_size": 3671136212.896}}
|
2023-03-21T20:20:36+00:00
|
6dfe3ddea439c4ba66029d9b2284871e5917822f
|
### Dataset is imported from CodeXGLUE and pre-processed using their script.
# Where to find in Semeru:
The dataset can be found at /nfs/semeru/semeru_datasets/code_xglue/code-to-code/Clone-detection-POJ-104 in Semeru
# CodeXGLUE -- Clone Detection (POJ-104)
## Task Definition
Given a code and a collection of candidates as the input, the task is to return Top K codes with the same semantic. Models are evaluated by MAP@R score. MAP@R is defined as the mean of average precision scores, each of which is evaluated for retrieving R most similar samples given a query. For a code (query), R is the number of other codes in the same class, i.e. R=499 in this dataset.
## Dataset
We use [POJ-104](https://arxiv.org/pdf/1409.5718.pdf) dataset on this task.
### Data Format
For each file, each line in the uncompressed file represents one function. One row is illustrated below.
- **code:** the source code
- **label:** the number of problem that the source code solves
- **index:** the index of example
### Data Statistics
Data statistics of the dataset are shown in the below table:
| | #Problems | #Examples |
| ----- | --------- | :-------: |
| Train | 64 | 32,000 |
| Dev | 16 | 8,000 |
| Test | 24 | 12,000 |
## Reference
<pre><code>@inproceedings{mou2016convolutional,
title={Convolutional neural networks over tree structures for programming language processing},
author={Mou, Lili and Li, Ge and Zhang, Lu and Wang, Tao and Jin, Zhi},
booktitle={Proceedings of the Thirtieth AAAI Conference on Artificial Intelligence},
pages={1287--1293},
year={2016}
}</code></pre>
|
semeru/Code-Code-CloneDetection-POJ104
|
[
"license:mit",
"arxiv:1409.5718",
"region:us"
] |
2023-03-21T19:44:24+00:00
|
{"license": "mit", "Programminglanguage": "C", "version": "N/A", "Date": "2015 POJ dataset from paper: https://arxiv.org/pdf/1409.5718.pdf", "Contaminated": "Very Likely", "Size": "Standard Tokenizer"}
|
2023-03-27T17:29:02+00:00
|
d6861110e812a0a941db36943ee65371c1f46540
|
### Dataset is imported from CodeXGLUE and pre-processed using their script.
# Where to find in Semeru:
The dataset can be found at /nfs/semeru/semeru_datasets/code_xglue/code-to-code/code-refinement/data/medium in Semeru
## Task Definition
Code refinement aims to automatically fix bugs in the code, which can contribute to reducing the cost of bug-fixes for developers.
In CodeXGLUE, given a piece of Java code with bugs, the task is to remove the bugs to output the refined code.
Models are evaluated by BLEU scores, accuracy (exactly match) and [CodeBLEU](https://github.com/microsoft/CodeXGLUE/blob/main/code-to-code-trans/CodeBLEU.MD).
## Dataset
We use the dataset released by this paper(https://arxiv.org/pdf/1812.08693.pdf). The source side is a Java function with bugs and the target side is the refined one.
All the function and variable names are normalized. Their dataset contains two subsets ( i.e.small and medium) based on the function length. This dataset is medium.
### Data Statistics
Data statistics of this dataset are shown in the below table:
| | #Examples |
| ------- | :-------: |
| | Medium |
| Train | 52,364 |
| Valid | 6,545 |
| Test | 6,545 |
# Reference
<pre><code>@article{tufano2019empirical,
title={An empirical study on learning bug-fixing patches in the wild via neural machine translation},
author={Tufano, Michele and Watson, Cody and Bavota, Gabriele and Penta, Massimiliano Di and White, Martin and Poshyvanyk, Denys},
journal={ACM Transactions on Software Engineering and Methodology (TOSEM)},
volume={28},
number={4},
pages={1--29},
year={2019},
publisher={ACM New York, NY, USA}
}</code></pre>
|
semeru/code-code-CodeRefinement-Java-Medium
|
[
"license:mit",
"arxiv:1812.08693",
"region:us"
] |
2023-03-21T19:57:57+00:00
|
{"license": "mit", "Programminglanguage": "Java", "version": "N/A", "Date": "May 2019 paper release date for https://arxiv.org/pdf/1812.08693.pdf", "Contaminated": "Very Likely", "Size": "Standard Tokenizer "}
|
2023-03-27T17:24:46+00:00
|
b7678eb87ffd22ddc934ad07b483893c30a6a7a1
|
### Dataset is imported from CodeXGLUE and pre-processed using their script.
# Where to find in Semeru:
The dataset can be found at /nfs/semeru/semeru_datasets/code_xglue/code-to-code/code-refinement/data/small in Semeru
## Task Definition
Code refinement aims to automatically fix bugs in the code, which can contribute to reducing the cost of bug-fixes for developers.
In CodeXGLUE, given a piece of Java code with bugs, the task is to remove the bugs to output the refined code.
Models are evaluated by BLEU scores, accuracy (exactly match) and [CodeBLEU](https://github.com/microsoft/CodeXGLUE/blob/main/code-to-code-trans/CodeBLEU.MD).
## Dataset
We use the dataset released by this paper(https://arxiv.org/pdf/1812.08693.pdf). The source side is a Java function with bugs and the target side is the refined one.
All the function and variable names are normalized. Their dataset contains two subsets ( i.e.small and medium) based on the function length. This dataset is small.
### Data Statistics
Data statistics of this dataset are shown in the below table:
| | #Examples |
| ------- | :-------: |
| | Small |
| Train | 46,680 |
| Valid | 5,835 |
| Test | 5,835 |
# Reference
<pre><code>@article{tufano2019empirical,
title={An empirical study on learning bug-fixing patches in the wild via neural machine translation},
author={Tufano, Michele and Watson, Cody and Bavota, Gabriele and Penta, Massimiliano Di and White, Martin and Poshyvanyk, Denys},
journal={ACM Transactions on Software Engineering and Methodology (TOSEM)},
volume={28},
number={4},
pages={1--29},
year={2019},
publisher={ACM New York, NY, USA}
}</code></pre>
|
semeru/code-code-CodeRefinement-Java-Small
|
[
"license:mit",
"arxiv:1812.08693",
"region:us"
] |
2023-03-21T20:03:09+00:00
|
{"license": "mit", "Programminglanguage": "Java", "version": "N/A", "Date": "May 2019 paper release date for https://arxiv.org/pdf/1812.08693.pdf", "Contaminated": "Very Likely", "Size": "Standard Tokenizer"}
|
2023-03-27T17:23:44+00:00
|
2eed561239d04b5c74a0c350b11849c8163f6f74
|
# Dataset Card for "processed_bert_dataset-test"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Exqrch/processed_bert_dataset-test
|
[
"region:us"
] |
2023-03-21T20:13:45+00:00
|
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "token_type_ids", "sequence": "int8"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "special_tokens_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 10029.0, "num_examples": 48}], "download_size": 6515, "dataset_size": 10029.0}}
|
2023-03-22T13:30:15+00:00
|
d41bf942e2f2e7e151cef4fe0de22fdec763b921
|
Dataset generated from cyrillic train set using Stackmix
========================================================
Number of images: 300000
Sources:
* [Cyrillic dataset](https://www.kaggle.com/datasets/constantinwerner/cyrillic-handwriting-dataset)
* [Stackmix code](https://github.com/ai-forever/StackMix-OCR)
|
nastyboget/stackmix_cyrillic
|
[
"task_categories:image-to-text",
"size_categories:100K<n<1M",
"language:ru",
"license:mit",
"region:us"
] |
2023-03-21T20:18:13+00:00
|
{"language": ["ru"], "license": "mit", "size_categories": ["100K<n<1M"], "task_categories": ["image-to-text"]}
|
2023-03-23T18:44:41+00:00
|
671500c2221f0033931d21a8264b9663813f4910
|
# Dataset Card for "speeches-congre-clean-names"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Sleoruiz/speeches-congre-clean-names
|
[
"region:us"
] |
2023-03-21T20:35:46+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "gaceta_numero", "dtype": "string"}, {"name": "fecha_gaceta", "dtype": "string"}, {"name": "comision", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 181327260, "num_examples": 94501}], "download_size": 92131968, "dataset_size": 181327260}}
|
2023-03-21T20:38:34+00:00
|
2308814b089b8e663c4a68d7862a6e210ad9fe88
|
Dataset generated from HKR train set using ScrabbleGAN
======================================================
Number of images: 300000
Sources:
* [HKR dataset](https://github.com/abdoelsayed2016/HKR_Dataset)
* [ScrabbleGAN code](https://github.com/ai-forever/ScrabbleGAN)
|
nastyboget/gan_hkr
|
[
"task_categories:image-to-text",
"size_categories:100K<n<1M",
"language:ru",
"license:mit",
"region:us"
] |
2023-03-21T21:10:05+00:00
|
{"language": ["ru"], "license": "mit", "size_categories": ["100K<n<1M"], "task_categories": ["image-to-text"]}
|
2023-03-23T18:43:53+00:00
|
460b1f3312aa21ef774e916e532a9576f7938a0d
|
# RuTurboAlpaca
Dataset of ChatGPT-generated instructions in Russian.
<img src="https://cdn.midjourney.com/770a35fa-00c0-4214-bb88-727dbc7cfaf3/0_0.png" >
* Code: [rulm/self_instruct](https://github.com/IlyaGusev/rulm/tree/master/self_instruct)
* Code is based on [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca) and [self-instruct](https://github.com/yizhongw/self-instruct/).
* 29822 examples
Preliminary evaluation by an expert based on 400 samples:
* 83% of samples contain correct instructions
* 63% of samples have correct instructions and outputs
Crowdsouring-based evaluation on 3500 samples:
* 90% of samples contain correct instructions
* 68% of samples have correct instructions and outputs
Prompt template:
```
Составь набор из {{num_tasks}} разных заданий для дообучения языковой модели:
1. Делай задания максимально непохожими друг на друга: по типу, по запрашиваемым действиям, по формулировке, по наличию входа.
2. Задания должны быть выполнимы языковой моделью, которая не умеет работать с картинками, видео, и аудио, и не имеет доступа ко внешнему миру.
3. Используй хороший грамотный русский язык.
4. Делай задания в одно или два предложения.
5. Генерируй подходящие реалистичные входные данные, не используй общие шаблоны типа \"Имя человека\" или [имя] вместо реального имени.
6. Задание может быть без входных данных, в таком случае используй токен <noinput> вместо них.
7. На выходе сгенерируй подходящий длинный ответ.
8. Следуй тому же шаблону, который приведен в примерах, разделяй задания с помощью ###. Это важно!
Примеры заданий:
{% for task in example_tasks %}
{{task.index}}. Задание: {{task.instruction}}
{{task.index}}. Вход: {{task.input}}
{{task.index}}. Выход: {{task.output}}
{{ "###" if not loop.last else "" }}
{% endfor %}
```
## Legal disclaimer
Data is based on OpenAI’s gpt-3.5-turbo, whose [terms of use](https://openai.com/policies/terms-of-use) prohibit for us developing models that compete with OpenAI. Not for you.
|
IlyaGusev/ru_turbo_alpaca
|
[
"task_categories:text-generation",
"task_categories:text2text-generation",
"size_categories:10K<n<100K",
"language:ru",
"license:cc-by-4.0",
"instruction-finetuning",
"instruction generation",
"alpaca",
"region:us"
] |
2023-03-21T21:17:42+00:00
|
{"language": ["ru"], "license": "cc-by-4.0", "size_categories": ["10K<n<100K"], "task_categories": ["text-generation", "text2text-generation"], "dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "alternative_output", "dtype": "string"}, {"name": "label", "dtype": "string"}, {"name": "all_labels", "sequence": "string"}, {"name": "agreement", "dtype": "float32"}, {"name": "overlap", "dtype": "uint32"}], "splits": [{"name": "train", "num_bytes": 54774775, "num_examples": 29822}], "download_size": 14565995, "dataset_size": 54774775}, "tags": ["instruction-finetuning", "instruction generation", "alpaca"]}
|
2023-05-25T18:45:14+00:00
|
d4d7dcdd080358c03e77c026c1acada1c9bb4998
|
# Dataset Card for "mayo_clinic_symptoms_and_diseases_v1"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
celikmus/mayo_clinic_symptoms_and_diseases_v1
|
[
"language:en",
"region:us"
] |
2023-03-21T21:31:15+00:00
|
{"language": "en", "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1321926, "num_examples": 1058}], "download_size": 626009, "dataset_size": 1321926}}
|
2023-07-16T18:37:52+00:00
|
5a78d0c3079b4d05de4c9151d4df188b691053f8
|
[ミツア](https://huggingface.co/Mitsua/mitsua-diffusion-one) 用 ネガティブ TI
## Test1
TI
- [badmitsua-test1-e10.pt](https://huggingface.co/datasets/p1atdev/badmitsua/blob/main/embeddings/badmitsua-test1-e10.pt)
データセット
mitsua-diffusion-one-base で生成した 150枚を使用
- [test1.zip](https://huggingface.co/datasets/p1atdev/badmitsua/blob/main/test1.zip)
|
p1atdev/badmitsua
|
[
"license:cc0-1.0",
"region:us"
] |
2023-03-21T21:51:49+00:00
|
{"license": "cc0-1.0"}
|
2023-03-21T22:24:10+00:00
|
91c0ea92482d3f91ba48d7554305d9c86339e6a3
|
# Dataset Card for "sentences-and-emotions"
Recognizing Emotion Cause in Conversations. Soujanya Poria, Navonil Majumder, Devamanyu Hazarika, Deepanway Ghosal, Rishabh Bhardwaj, Samson Yu Bai Jian, Pengfei Hong, Romila Ghosh, Abhinaba Roy, Niyati Chhaya, Alexander Gelbukh, Rada Mihalcea. Cognitive Computation (2021).
|
Deysi/sentences-and-emotions
|
[
"task_categories:text-classification",
"size_categories:100K<n<1M",
"language:en",
"region:us"
] |
2023-03-21T22:23:47+00:00
|
{"language": ["en"], "size_categories": ["100K<n<1M"], "task_categories": ["text-classification"], "pretty_name": "Sentences and emotions", "dataset_info": {"features": [{"name": "utterance", "dtype": "string"}, {"name": "emotion", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 62487, "num_examples": 816}, {"name": "valid", "num_bytes": 39971, "num_examples": 493}, {"name": "train", "num_bytes": 188423, "num_examples": 2405}], "download_size": 36170, "dataset_size": 290881}}
|
2023-03-21T22:54:16+00:00
|
c8647293d538fe3aeda3539a3f4274d1b5ea6762
|
# Dataset Card for "GunDataset"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
pzalavad/HelmetDataset
|
[
"region:us"
] |
2023-03-21T22:56:57+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "Helmet", "1": "Helmet No helmet", "2": "No helmet"}}}}], "splits": [{"name": "train", "num_bytes": 29913013.721, "num_examples": 2193}, {"name": "test", "num_bytes": 1107623.0, "num_examples": 107}, {"name": "validation", "num_bytes": 2204328.0, "num_examples": 209}], "download_size": 34614447, "dataset_size": 33224964.721}}
|
2023-03-25T00:49:25+00:00
|
39180bb8d156c572b2b6f3be0c7bb6fc5641c1c4
|
Stonehome/DromariaOneNote
|
[
"region:us"
] |
2023-03-22T00:12:29+00:00
|
{}
|
2023-03-22T00:27:29+00:00
|
|
8244f185697cf454a580255f2fbf24d5c13870be
|
# Codex Math QA
Solve math_qa using codex-davinci-002 via Python Programming.
[Since OpenAI decided to shut off code-davinci-002 behind Azure](https://mobile.twitter.com/Veqtor/status/1638081493099597824), this dataset aims to share the generation results for code-davinci-002 OpenAI's 176B code generation model.
### Data Splits
| name |train|validation|test|
|-------|----:|---------:|---:|
|main|25065| 4133|2985|
|rational| - | 4151|2985|
1. Basic prompts : only question
```
dataset = load_dataset("theblackcat102/codex-math-qa", "main")
```
**Example :**
```
the average weight of 8 person ' s increases by 1.5 kg when a new person comes in place of one of them weighing 75 kg. what might be the weight of the new person ?
Write a short snippet of python function which solve this problem. No need to explain the answer, let the code output the answer.
```
output
```python
def average_weight(weights):
total_weight = sum(weights)
return total_weight / len(weights)
def new_person_weight(weights):
average_weight = average_weight(weights)
new_person_weight = average_weight + 1.5
return new_person_weight
weights = [75, 80, 85, 90, 95]
print(new_person_weight(weights))
```
2. Prompts with rationale : include the `Rationale` text which should helps the programming planinig
```
dataset = load_dataset("theblackcat102/codex-math-qa", "rational")
```
**Example :**
```
one copy machine can make 20 copies a minute, and a second copy machine makes 10 copies a minute. if the two copiers work together, how long would it take them to make 900 copies ?
Rationale: "total work done by both machines in a minute = 20 + 10 = 30 copies total number of copies required = 900 time = 900 / 30 = 30 mins answer d"
Write a short snippet of python function which solve this problem within 30 lines of code. Add comments in the function block for explaination.
```
output
```python
def copy_machine(copies, copy_speed):
"""
:param copies: number of copies required
:param copy_speed: speed of the copy machine
:return: time in minutes
"""
total_work = sum(copy_speed)
time = copies / total_work
return time
if __name__ == "__main__":
print(copy_machine(900, [20, 10]))
```
### Notes:
The generated results are unvalidated and are as what it is from the codex-davinci-002 outputs. So there's a majority of answers which is incorrect and code with syntax error. However, this is a work for a future study and the aim of this dataset was to provide a source or reference for code based math answering by codex-davinci-002.
## Dataset Creation
Dataset was sourced from [math_qa](https://huggingface.co/datasets/math_qa) and append prompts at the end of section for generating Python solutions for the answer. This is an aim for providing dataset for the work offload seem in galactica
The generation config for code-davinci-02 are as follows:
| name | value|
|-------|----:|
|max_tokens| 2048 |
|temperature| 0.5 |
|top_p| 0.7 |
### Citation Information
```
@inproceedings{amini-etal-2019-mathqa,
title = "{M}ath{QA}: Towards Interpretable Math Word Problem Solving with Operation-Based Formalisms",
author = "Amini, Aida and
Gabriel, Saadia and
Lin, Shanchuan and
Koncel-Kedziorski, Rik and
Choi, Yejin and
Hajishirzi, Hannaneh",
booktitle = "Proceedings of the 2019 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)",
month = jun,
year = "2019",
address = "Minneapolis, Minnesota",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/N19-1245",
doi = "10.18653/v1/N19-1245",
pages = "2357--2367",
}
```
|
theblackcat102/codex-math-qa
|
[
"task_categories:text2text-generation",
"task_categories:text-generation",
"language:en",
"license:other",
"codex-generated",
"code",
"mathematic",
"region:us"
] |
2023-03-22T00:56:14+00:00
|
{"language": ["en"], "license": "other", "task_categories": ["text2text-generation", "text-generation"], "tags": ["codex-generated", "code", "mathematic"]}
|
2023-03-26T00:04:18+00:00
|
e8cadd6bf716fba2e5d8d0e8dfb4327683a4261c
|
# Dataset Card for "Food101_train"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Multimodal-Fatima/Food101_train
|
[
"region:us"
] |
2023-03-22T01:07:54+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "apple pie", "1": "baby back ribs", "2": "baklava", "3": "beef carpaccio", "4": "beef tartare", "5": "beet salad", "6": "beignets", "7": "bibimbap", "8": "bread pudding", "9": "breakfast burrito", "10": "bruschetta", "11": "caesar salad", "12": "cannoli", "13": "caprese salad", "14": "carrot cake", "15": "ceviche", "16": "cheesecake", "17": "cheese plate", "18": "chicken curry", "19": "chicken quesadilla", "20": "chicken wings", "21": "chocolate cake", "22": "chocolate mousse", "23": "churros", "24": "clam chowder", "25": "club sandwich", "26": "crab cakes", "27": "creme brulee", "28": "croque madame", "29": "cup cakes", "30": "deviled eggs", "31": "donuts", "32": "dumplings", "33": "edamame", "34": "eggs benedict", "35": "escargots", "36": "falafel", "37": "filet mignon", "38": "fish and chips", "39": "foie gras", "40": "french fries", "41": "french onion soup", "42": "french toast", "43": "fried calamari", "44": "fried rice", "45": "frozen yogurt", "46": "garlic bread", "47": "gnocchi", "48": "greek salad", "49": "grilled cheese sandwich", "50": "grilled salmon", "51": "guacamole", "52": "gyoza", "53": "hamburger", "54": "hot and sour soup", "55": "hot dog", "56": "huevos rancheros", "57": "hummus", "58": "ice cream", "59": "lasagna", "60": "lobster bisque", "61": "lobster roll sandwich", "62": "macaroni and cheese", "63": "macarons", "64": "miso soup", "65": "mussels", "66": "nachos", "67": "omelette", "68": "onion rings", "69": "oysters", "70": "pad thai", "71": "paella", "72": "pancakes", "73": "panna cotta", "74": "peking duck", "75": "pho", "76": "pizza", "77": "pork chop", "78": "poutine", "79": "prime rib", "80": "pulled pork sandwich", "81": "ramen", "82": "ravioli", "83": "red velvet cake", "84": "risotto", "85": "samosa", "86": "sashimi", "87": "scallops", "88": "seaweed salad", "89": "shrimp and grits", "90": "spaghetti bolognese", "91": "spaghetti carbonara", "92": "spring rolls", "93": "steak", "94": "strawberry shortcake", "95": "sushi", "96": "tacos", "97": "takoyaki", "98": "tiramisu", "99": "tuna tartare", "100": "waffles"}}}}, {"name": "id", "dtype": "int64"}, {"name": "clip_tags_ViT_L_14_with_openai_classes", "sequence": "string"}, {"name": "clip_tags_ViT_L_14_wo_openai_classes", "sequence": "string"}, {"name": "clip_tags_ViT_L_14_simple_specific", "dtype": "string"}, {"name": "clip_tags_ViT_L_14_ensemble_specific", "dtype": "string"}, {"name": "clip_tags_ViT_B_16_simple_specific", "dtype": "string"}, {"name": "clip_tags_ViT_B_16_ensemble_specific", "dtype": "string"}, {"name": "clip_tags_ViT_B_32_simple_specific", "dtype": "string"}, {"name": "clip_tags_ViT_B_32_ensemble_specific", "dtype": "string"}, {"name": "Attributes_ViT_L_14_descriptors_text_davinci_003_full", "sequence": "string"}, {"name": "Attributes_LAION_ViT_H_14_2B_descriptors_text_davinci_003_full", "sequence": "string"}, {"name": "clip_tags_LAION_ViT_H_14_2B_simple_specific", "dtype": "string"}, {"name": "clip_tags_LAION_ViT_H_14_2B_ensemble_specific", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3906035933.0, "num_examples": 75750}], "download_size": 3802666577, "dataset_size": 3906035933.0}}
|
2023-05-04T05:17:20+00:00
|
408419795277f050a1f9f07780bebb46ac162f85
|
# Dataset Card for "Food101_test"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Multimodal-Fatima/Food101_test
|
[
"region:us"
] |
2023-03-22T01:14:18+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "apple pie", "1": "baby back ribs", "2": "baklava", "3": "beef carpaccio", "4": "beef tartare", "5": "beet salad", "6": "beignets", "7": "bibimbap", "8": "bread pudding", "9": "breakfast burrito", "10": "bruschetta", "11": "caesar salad", "12": "cannoli", "13": "caprese salad", "14": "carrot cake", "15": "ceviche", "16": "cheesecake", "17": "cheese plate", "18": "chicken curry", "19": "chicken quesadilla", "20": "chicken wings", "21": "chocolate cake", "22": "chocolate mousse", "23": "churros", "24": "clam chowder", "25": "club sandwich", "26": "crab cakes", "27": "creme brulee", "28": "croque madame", "29": "cup cakes", "30": "deviled eggs", "31": "donuts", "32": "dumplings", "33": "edamame", "34": "eggs benedict", "35": "escargots", "36": "falafel", "37": "filet mignon", "38": "fish and chips", "39": "foie gras", "40": "french fries", "41": "french onion soup", "42": "french toast", "43": "fried calamari", "44": "fried rice", "45": "frozen yogurt", "46": "garlic bread", "47": "gnocchi", "48": "greek salad", "49": "grilled cheese sandwich", "50": "grilled salmon", "51": "guacamole", "52": "gyoza", "53": "hamburger", "54": "hot and sour soup", "55": "hot dog", "56": "huevos rancheros", "57": "hummus", "58": "ice cream", "59": "lasagna", "60": "lobster bisque", "61": "lobster roll sandwich", "62": "macaroni and cheese", "63": "macarons", "64": "miso soup", "65": "mussels", "66": "nachos", "67": "omelette", "68": "onion rings", "69": "oysters", "70": "pad thai", "71": "paella", "72": "pancakes", "73": "panna cotta", "74": "peking duck", "75": "pho", "76": "pizza", "77": "pork chop", "78": "poutine", "79": "prime rib", "80": "pulled pork sandwich", "81": "ramen", "82": "ravioli", "83": "red velvet cake", "84": "risotto", "85": "samosa", "86": "sashimi", "87": "scallops", "88": "seaweed salad", "89": "shrimp and grits", "90": "spaghetti bolognese", "91": "spaghetti carbonara", "92": "spring rolls", "93": "steak", "94": "strawberry shortcake", "95": "sushi", "96": "tacos", "97": "takoyaki", "98": "tiramisu", "99": "tuna tartare", "100": "waffles"}}}}, {"name": "id", "dtype": "int64"}, {"name": "Attributes_ViT_L_14_text_davinci_003_full", "sequence": "string"}, {"name": "Attributes_ViT_L_14_text_davinci_003_food101", "sequence": "string"}, {"name": "clip_tags_ViT_L_14_with_openai_classes", "sequence": "string"}, {"name": "clip_tags_ViT_L_14_wo_openai_classes", "sequence": "string"}, {"name": "clip_tags_ViT_L_14_simple_specific", "dtype": "string"}, {"name": "clip_tags_ViT_L_14_ensemble_specific", "dtype": "string"}, {"name": "clip_tags_ViT_B_16_simple_specific", "dtype": "string"}, {"name": "clip_tags_ViT_B_16_ensemble_specific", "dtype": "string"}, {"name": "clip_tags_ViT_B_32_simple_specific", "dtype": "string"}, {"name": "clip_tags_ViT_B_32_ensemble_specific", "dtype": "string"}, {"name": "Attributes_ViT_L_14_descriptors_text_davinci_003_full", "sequence": "string"}, {"name": "Attributes_ViT_B_16_descriptors_text_davinci_003_full", "sequence": "string"}, {"name": "Attributes_LAION_ViT_H_14_2B_descriptors_text_davinci_003_full", "sequence": "string"}, {"name": "clip_tags_LAION_ViT_H_14_2B_simple_specific", "dtype": "string"}, {"name": "clip_tags_LAION_ViT_H_14_2B_ensemble_specific", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 1317820332.5, "num_examples": 25250}], "download_size": 1263803958, "dataset_size": 1317820332.5}}
|
2023-05-04T05:23:00+00:00
|
bd3988995e6db193427255abdebd522afe24ce76
|
# Dataset Card for "CUB_train"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Multimodal-Fatima/CUB_train
|
[
"region:us"
] |
2023-03-22T02:07:02+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "description", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "Black footed Albatross", "1": "Laysan Albatross", "2": "Sooty Albatross", "3": "Groove billed Ani", "4": "Crested Auklet", "5": "Least Auklet", "6": "Parakeet Auklet", "7": "Rhinoceros Auklet", "8": "Brewer Blackbird", "9": "Red winged Blackbird", "10": "Rusty Blackbird", "11": "Yellow headed Blackbird", "12": "Bobolink", "13": "Indigo Bunting", "14": "Lazuli Bunting", "15": "Painted Bunting", "16": "Cardinal", "17": "Spotted Catbird", "18": "Gray Catbird", "19": "Yellow breasted Chat", "20": "Eastern Towhee", "21": "Chuck will Widow", "22": "Brandt Cormorant", "23": "Red faced Cormorant", "24": "Pelagic Cormorant", "25": "Bronzed Cowbird", "26": "Shiny Cowbird", "27": "Brown Creeper", "28": "American Crow", "29": "Fish Crow", "30": "Black billed Cuckoo", "31": "Mangrove Cuckoo", "32": "Yellow billed Cuckoo", "33": "Gray crowned Rosy Finch", "34": "Purple Finch", "35": "Northern Flicker", "36": "Acadian Flycatcher", "37": "Great Crested Flycatcher", "38": "Least Flycatcher", "39": "Olive sided Flycatcher", "40": "Scissor tailed Flycatcher", "41": "Vermilion Flycatcher", "42": "Yellow bellied Flycatcher", "43": "Frigatebird", "44": "Northern Fulmar", "45": "Gadwall", "46": "American Goldfinch", "47": "European Goldfinch", "48": "Boat tailed Grackle", "49": "Eared Grebe", "50": "Horned Grebe", "51": "Pied billed Grebe", "52": "Western Grebe", "53": "Blue Grosbeak", "54": "Evening Grosbeak", "55": "Pine Grosbeak", "56": "Rose breasted Grosbeak", "57": "Pigeon Guillemot", "58": "California Gull", "59": "Glaucous winged Gull", "60": "Heermann Gull", "61": "Herring Gull", "62": "Ivory Gull", "63": "Ring billed Gull", "64": "Slaty backed Gull", "65": "Western Gull", "66": "Anna Hummingbird", "67": "Ruby throated Hummingbird", "68": "Rufous Hummingbird", "69": "Green Violetear", "70": "Long tailed Jaeger", "71": "Pomarine Jaeger", "72": "Blue Jay", "73": "Florida Jay", "74": "Green Jay", "75": "Dark eyed Junco", "76": "Tropical Kingbird", "77": "Gray Kingbird", "78": "Belted Kingfisher", "79": "Green Kingfisher", "80": "Pied Kingfisher", "81": "Ringed Kingfisher", "82": "White breasted Kingfisher", "83": "Red legged Kittiwake", "84": "Horned Lark", "85": "Pacific Loon", "86": "Mallard", "87": "Western Meadowlark", "88": "Hooded Merganser", "89": "Red breasted Merganser", "90": "Mockingbird", "91": "Nighthawk", "92": "Clark Nutcracker", "93": "White breasted Nuthatch", "94": "Baltimore Oriole", "95": "Hooded Oriole", "96": "Orchard Oriole", "97": "Scott Oriole", "98": "Ovenbird", "99": "Brown Pelican", "100": "White Pelican", "101": "Western Wood Pewee", "102": "Sayornis", "103": "American Pipit", "104": "Whip poor Will", "105": "Horned Puffin", "106": "Common Raven", "107": "White necked Raven", "108": "American Redstart", "109": "Geococcyx", "110": "Loggerhead Shrike", "111": "Great Grey Shrike", "112": "Baird Sparrow", "113": "Black throated Sparrow", "114": "Brewer Sparrow", "115": "Chipping Sparrow", "116": "Clay colored Sparrow", "117": "House Sparrow", "118": "Field Sparrow", "119": "Fox Sparrow", "120": "Grasshopper Sparrow", "121": "Harris Sparrow", "122": "Henslow Sparrow", "123": "Le Conte Sparrow", "124": "Lincoln Sparrow", "125": "Nelson Sharp tailed Sparrow", "126": "Savannah Sparrow", "127": "Seaside Sparrow", "128": "Song Sparrow", "129": "Tree Sparrow", "130": "Vesper Sparrow", "131": "White crowned Sparrow", "132": "White throated Sparrow", "133": "Cape Glossy Starling", "134": "Bank Swallow", "135": "Barn Swallow", "136": "Cliff Swallow", "137": "Tree Swallow", "138": "Scarlet Tanager", "139": "Summer Tanager", "140": "Artic Tern", "141": "Black Tern", "142": "Caspian Tern", "143": "Common Tern", "144": "Elegant Tern", "145": "Forsters Tern", "146": "Least Tern", "147": "Green tailed Towhee", "148": "Brown Thrasher", "149": "Sage Thrasher", "150": "Black capped Vireo", "151": "Blue headed Vireo", "152": "Philadelphia Vireo", "153": "Red eyed Vireo", "154": "Warbling Vireo", "155": "White eyed Vireo", "156": "Yellow throated Vireo", "157": "Bay breasted Warbler", "158": "Black and white Warbler", "159": "Black throated Blue Warbler", "160": "Blue winged Warbler", "161": "Canada Warbler", "162": "Cape May Warbler", "163": "Cerulean Warbler", "164": "Chestnut sided Warbler", "165": "Golden winged Warbler", "166": "Hooded Warbler", "167": "Kentucky Warbler", "168": "Magnolia Warbler", "169": "Mourning Warbler", "170": "Myrtle Warbler", "171": "Nashville Warbler", "172": "Orange crowned Warbler", "173": "Palm Warbler", "174": "Pine Warbler", "175": "Prairie Warbler", "176": "Prothonotary Warbler", "177": "Swainson Warbler", "178": "Tennessee Warbler", "179": "Wilson Warbler", "180": "Worm eating Warbler", "181": "Yellow Warbler", "182": "Northern Waterthrush", "183": "Louisiana Waterthrush", "184": "Bohemian Waxwing", "185": "Cedar Waxwing", "186": "American Three toed Woodpecker", "187": "Pileated Woodpecker", "188": "Red bellied Woodpecker", "189": "Red cockaded Woodpecker", "190": "Red headed Woodpecker", "191": "Downy Woodpecker", "192": "Bewick Wren", "193": "Cactus Wren", "194": "Carolina Wren", "195": "House Wren", "196": "Marsh Wren", "197": "Rock Wren", "198": "Winter Wren", "199": "Common Yellowthroat"}}}}, {"name": "file_name", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 583337273.046, "num_examples": 5994}], "download_size": 583734869, "dataset_size": 583337273.046}}
|
2023-03-22T02:08:06+00:00
|
82447a42c5e05c7fe21cd36accfdf399148e8668
|
# Dataset Card for "CUB_test"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Multimodal-Fatima/CUB_test
|
[
"region:us"
] |
2023-03-22T02:08:06+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "description", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "Black footed Albatross", "1": "Laysan Albatross", "2": "Sooty Albatross", "3": "Groove billed Ani", "4": "Crested Auklet", "5": "Least Auklet", "6": "Parakeet Auklet", "7": "Rhinoceros Auklet", "8": "Brewer Blackbird", "9": "Red winged Blackbird", "10": "Rusty Blackbird", "11": "Yellow headed Blackbird", "12": "Bobolink", "13": "Indigo Bunting", "14": "Lazuli Bunting", "15": "Painted Bunting", "16": "Cardinal", "17": "Spotted Catbird", "18": "Gray Catbird", "19": "Yellow breasted Chat", "20": "Eastern Towhee", "21": "Chuck will Widow", "22": "Brandt Cormorant", "23": "Red faced Cormorant", "24": "Pelagic Cormorant", "25": "Bronzed Cowbird", "26": "Shiny Cowbird", "27": "Brown Creeper", "28": "American Crow", "29": "Fish Crow", "30": "Black billed Cuckoo", "31": "Mangrove Cuckoo", "32": "Yellow billed Cuckoo", "33": "Gray crowned Rosy Finch", "34": "Purple Finch", "35": "Northern Flicker", "36": "Acadian Flycatcher", "37": "Great Crested Flycatcher", "38": "Least Flycatcher", "39": "Olive sided Flycatcher", "40": "Scissor tailed Flycatcher", "41": "Vermilion Flycatcher", "42": "Yellow bellied Flycatcher", "43": "Frigatebird", "44": "Northern Fulmar", "45": "Gadwall", "46": "American Goldfinch", "47": "European Goldfinch", "48": "Boat tailed Grackle", "49": "Eared Grebe", "50": "Horned Grebe", "51": "Pied billed Grebe", "52": "Western Grebe", "53": "Blue Grosbeak", "54": "Evening Grosbeak", "55": "Pine Grosbeak", "56": "Rose breasted Grosbeak", "57": "Pigeon Guillemot", "58": "California Gull", "59": "Glaucous winged Gull", "60": "Heermann Gull", "61": "Herring Gull", "62": "Ivory Gull", "63": "Ring billed Gull", "64": "Slaty backed Gull", "65": "Western Gull", "66": "Anna Hummingbird", "67": "Ruby throated Hummingbird", "68": "Rufous Hummingbird", "69": "Green Violetear", "70": "Long tailed Jaeger", "71": "Pomarine Jaeger", "72": "Blue Jay", "73": "Florida Jay", "74": "Green Jay", "75": "Dark eyed Junco", "76": "Tropical Kingbird", "77": "Gray Kingbird", "78": "Belted Kingfisher", "79": "Green Kingfisher", "80": "Pied Kingfisher", "81": "Ringed Kingfisher", "82": "White breasted Kingfisher", "83": "Red legged Kittiwake", "84": "Horned Lark", "85": "Pacific Loon", "86": "Mallard", "87": "Western Meadowlark", "88": "Hooded Merganser", "89": "Red breasted Merganser", "90": "Mockingbird", "91": "Nighthawk", "92": "Clark Nutcracker", "93": "White breasted Nuthatch", "94": "Baltimore Oriole", "95": "Hooded Oriole", "96": "Orchard Oriole", "97": "Scott Oriole", "98": "Ovenbird", "99": "Brown Pelican", "100": "White Pelican", "101": "Western Wood Pewee", "102": "Sayornis", "103": "American Pipit", "104": "Whip poor Will", "105": "Horned Puffin", "106": "Common Raven", "107": "White necked Raven", "108": "American Redstart", "109": "Geococcyx", "110": "Loggerhead Shrike", "111": "Great Grey Shrike", "112": "Baird Sparrow", "113": "Black throated Sparrow", "114": "Brewer Sparrow", "115": "Chipping Sparrow", "116": "Clay colored Sparrow", "117": "House Sparrow", "118": "Field Sparrow", "119": "Fox Sparrow", "120": "Grasshopper Sparrow", "121": "Harris Sparrow", "122": "Henslow Sparrow", "123": "Le Conte Sparrow", "124": "Lincoln Sparrow", "125": "Nelson Sharp tailed Sparrow", "126": "Savannah Sparrow", "127": "Seaside Sparrow", "128": "Song Sparrow", "129": "Tree Sparrow", "130": "Vesper Sparrow", "131": "White crowned Sparrow", "132": "White throated Sparrow", "133": "Cape Glossy Starling", "134": "Bank Swallow", "135": "Barn Swallow", "136": "Cliff Swallow", "137": "Tree Swallow", "138": "Scarlet Tanager", "139": "Summer Tanager", "140": "Artic Tern", "141": "Black Tern", "142": "Caspian Tern", "143": "Common Tern", "144": "Elegant Tern", "145": "Forsters Tern", "146": "Least Tern", "147": "Green tailed Towhee", "148": "Brown Thrasher", "149": "Sage Thrasher", "150": "Black capped Vireo", "151": "Blue headed Vireo", "152": "Philadelphia Vireo", "153": "Red eyed Vireo", "154": "Warbling Vireo", "155": "White eyed Vireo", "156": "Yellow throated Vireo", "157": "Bay breasted Warbler", "158": "Black and white Warbler", "159": "Black throated Blue Warbler", "160": "Blue winged Warbler", "161": "Canada Warbler", "162": "Cape May Warbler", "163": "Cerulean Warbler", "164": "Chestnut sided Warbler", "165": "Golden winged Warbler", "166": "Hooded Warbler", "167": "Kentucky Warbler", "168": "Magnolia Warbler", "169": "Mourning Warbler", "170": "Myrtle Warbler", "171": "Nashville Warbler", "172": "Orange crowned Warbler", "173": "Palm Warbler", "174": "Pine Warbler", "175": "Prairie Warbler", "176": "Prothonotary Warbler", "177": "Swainson Warbler", "178": "Tennessee Warbler", "179": "Wilson Warbler", "180": "Worm eating Warbler", "181": "Yellow Warbler", "182": "Northern Waterthrush", "183": "Louisiana Waterthrush", "184": "Bohemian Waxwing", "185": "Cedar Waxwing", "186": "American Three toed Woodpecker", "187": "Pileated Woodpecker", "188": "Red bellied Woodpecker", "189": "Red cockaded Woodpecker", "190": "Red headed Woodpecker", "191": "Downy Woodpecker", "192": "Bewick Wren", "193": "Cactus Wren", "194": "Carolina Wren", "195": "House Wren", "196": "Marsh Wren", "197": "Rock Wren", "198": "Winter Wren", "199": "Common Yellowthroat"}}}}, {"name": "file_name", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 576586188.934, "num_examples": 5794}], "download_size": 564530335, "dataset_size": 576586188.934}}
|
2023-03-22T02:08:54+00:00
|
d117cdbeba25212c1fa0c6964ebf2fc2abf3736a
|
### Dataset is imported from CodeXGLUE and pre-processed using their script.
# Where to find in Semeru:
The dataset can be found at /nfs/semeru/semeru_datasets/code_xglue/code-to-code/code-to-code-trans in Semeru
# CodeXGLUE -- Code2Code Translation
## Task Definition
Code translation aims to migrate legacy software from one programming language in a platform toanother.
In CodeXGLUE, given a piece of Java (C#) code, the task is to translate the code into C# (Java) version.
Models are evaluated by BLEU scores, accuracy (exactly match), and [CodeBLEU](https://github.com/microsoft/CodeXGLUE/blob/main/code-to-code-trans/CodeBLEU.MD) scores.
## Dataset
The dataset is collected from several public repos, including Lucene(http://lucene.apache.org/), POI(http://poi.apache.org/), JGit(https://github.com/eclipse/jgit/) and Antlr(https://github.com/antlr/).
We collect both the Java and C# versions of the codes and find the parallel functions. After removing duplicates and functions with the empty body, we split the whole dataset into training, validation and test sets.
### Data Format
The dataset is in the "data" folder. Each line of the files is a function, and the suffix of the file indicates the programming language.
### Data Statistics
Data statistics of the dataset are shown in the below table:
| | #Examples |
| ------- | :-------: |
| Train | 10,300 |
| Valid | 500 |
| Test | 1,000 |
|
semeru/code-code-translation-java-csharp
|
[
"license:mit",
"region:us"
] |
2023-03-22T02:10:54+00:00
|
{"license": "mit", "Programminglanguage": "Java/C#", "version": "N/A", "Date": "Most likely 2020", "Contaminated": "Very Likely", "Size": "Standard Tokenizer"}
|
2023-03-27T17:22:28+00:00
|
73b2c0e8a9e7ac2fe24eaf4e78a4e67378f39efa
|
### Dataset is imported from CodeXGLUE and pre-processed using their script.
# Where to find in Semeru:
The dataset can be found at /nfs/semeru/semeru_datasets/code_xglue/code-to-code/CodeCompletion-token/dataset/javaCorpus in Semeru
# CodeXGLUE -- Code Completion (token level)
**Update 2021.07.30:** We update the code completion dataset with literals normalized to avoid sensitive information.
Here is the introduction and pipeline for token level code completion task.
## Task Definition
Predict next code token given context of previous tokens. Models are evaluated by token level accuracy.
Code completion is a one of the most widely used features in software development through IDEs. An effective code completion tool could improve software developers' productivity. We provide code completion evaluation tasks in two granularities -- token level and line level. Here we introduce token level code completion. Token level task is analogous to language modeling. Models should have be able to predict the next token in arbitary types.
## Dataset
The dataset is in java.
### Dependency
- javalang == 0.13.0
### Github Java Corpus
We use java corpus dataset mined by Allamanis and Sutton, in their MSR 2013 paper [Mining Source Code Repositories at Massive Scale using Language Modeling](https://homepages.inf.ed.ac.uk/csutton/publications/msr2013.pdf). We follow the same split and preprocessing in Karampatsis's ICSE 2020 paper [Big Code != Big Vocabulary: Open-Vocabulary Models for Source Code](http://homepages.inf.ed.ac.uk/s1467463/documents/icse20-main-1325.pdf).
### Data Format
Code corpus are saved in txt format files. one line is a tokenized code snippets:
```
<s> from __future__ import unicode_literals <EOL> from django . db import models , migrations <EOL> class Migration ( migrations . Migration ) : <EOL> dependencies = [ <EOL> ] <EOL> operations = [ <EOL> migrations . CreateModel ( <EOL> name = '<STR_LIT>' , <EOL> fields = [ <EOL> ( '<STR_LIT:id>' , models . AutoField ( verbose_name = '<STR_LIT>' , serialize = False , auto_created = True , primary_key = True ) ) , <EOL> ( '<STR_LIT:name>' , models . CharField ( help_text = b'<STR_LIT>' , max_length = <NUM_LIT> ) ) , <EOL> ( '<STR_LIT:image>' , models . ImageField ( help_text = b'<STR_LIT>' , null = True , upload_to = b'<STR_LIT>' , blank = True ) ) , <EOL> ] , <EOL> options = { <EOL> '<STR_LIT>' : ( '<STR_LIT:name>' , ) , <EOL> '<STR_LIT>' : '<STR_LIT>' , <EOL> } , <EOL> bases = ( models . Model , ) , <EOL> ) , <EOL> ] </s>
```
### Data Statistics
Data statistics of Github Java Corpus dataset are shown in the below table:
| Data Split | #Files | #Tokens |
| ----------- | :--------: | :---------: |
| Train | 12,934 | 15.7M |
| Dev | 7,176 | 3.8M |
| Test | 8,268 | 5.3M |
|
semeru/code-code-CodeCompletion-TokenLevel-Java
|
[
"license:mit",
"region:us"
] |
2023-03-22T03:11:04+00:00
|
{"license": "mit", "Programminglanguage": "Java", "version": "N/A", "Date": "From paper: https://homepages.inf.ed.ac.uk/csutton/publications/msr2013.pdf (2013 - paper release date)", "Contaminated": "Very Likely", "Size": "Standard Tokenizer (TreeSitter)"}
|
2023-03-24T14:22:48+00:00
|
90902c2fe01b7c5672d9e104107be57ce093ff67
|
### Dataset is imported from CodeXGLUE and pre-processed using their script.
# Where to find in Semeru:
The dataset can be found at /nfs/semeru/semeru_datasets/code_xglue/code-to-code/CodeCompletion-token/dataset/py150 in Semeru
# CodeXGLUE -- Code Completion (token level)
**Update 2021.07.30:** We update the code completion dataset with literals normalized to avoid sensitive information.
Here is the introduction and pipeline for token level code completion task.
## Task Definition
Predict next code token given context of previous tokens. Models are evaluated by token level accuracy.
Code completion is a one of the most widely used features in software development through IDEs. An effective code completion tool could improve software developers' productivity. We provide code completion evaluation tasks in two granularities -- token level and line level. Here we introduce token level code completion. Token level task is analogous to language modeling. Models should have be able to predict the next token in arbitary types.
## Dataset
The dataset is in python.
### Dependency
- python 3.7
### Github Java Corpus
We use java corpus dataset mined by Allamanis and Sutton, in their MSR 2013 paper [Mining Source Code Repositories at Massive Scale using Language Modeling](https://homepages.inf.ed.ac.uk/csutton/publications/msr2013.pdf). We follow the same split and preprocessing in Karampatsis's ICSE 2020 paper [Big Code != Big Vocabulary: Open-Vocabulary Models for Source Code](http://homepages.inf.ed.ac.uk/s1467463/documents/icse20-main-1325.pdf).
### Data Format
Code corpus are saved in txt format files. one line is a tokenized code snippets:
```
<s> from __future__ import unicode_literals <EOL> from django . db import models , migrations <EOL> class Migration ( migrations . Migration ) : <EOL> dependencies = [ <EOL> ] <EOL> operations = [ <EOL> migrations . CreateModel ( <EOL> name = '<STR_LIT>' , <EOL> fields = [ <EOL> ( '<STR_LIT:id>' , models . AutoField ( verbose_name = '<STR_LIT>' , serialize = False , auto_created = True , primary_key = True ) ) , <EOL> ( '<STR_LIT:name>' , models . CharField ( help_text = b'<STR_LIT>' , max_length = <NUM_LIT> ) ) , <EOL> ( '<STR_LIT:image>' , models . ImageField ( help_text = b'<STR_LIT>' , null = True , upload_to = b'<STR_LIT>' , blank = True ) ) , <EOL> ] , <EOL> options = { <EOL> '<STR_LIT>' : ( '<STR_LIT:name>' , ) , <EOL> '<STR_LIT>' : '<STR_LIT>' , <EOL> } , <EOL> bases = ( models . Model , ) , <EOL> ) , <EOL> ] </s>
```
### Data Statistics
Data statistics of py150 dataset are shown in the below table, note that there doesn't exist dev set in the origin py150 dataset, we select 5,000 files in the original train set as dev set.
| Data Split | #Files | #Tokens |
| ----------- | :---------: | :---------: |
| Train | 95,000 | 72.1M |
| Dev | 5,000 | 4.4M |
| Test | 50,000 | 37.3M |
|
semeru/code-code-CodeCompletion-TokenLevel-Python
|
[
"license:mit",
"region:us"
] |
2023-03-22T03:21:32+00:00
|
{"license": "mit", "Programminglanguage": "python", "version": "python3", "Date": "From paper [Probabilistic for Code with Decision trees](https://files.sri.inf.ethz.ch/website/papers/oopsla16-dt.pdf)(2016- paper release date)", "Contaminated": "Very Likely", "Size": "Standard Tokenizer (TreeSitter)"}
|
2023-03-24T14:10:30+00:00
|
c8cbfeecf4090a18678384c559102a68bfa00829
|
### Dataset is imported from CodeXGLUE and pre-processed using their script.
# Where to find in Semeru:
The dataset can be found at /nfs/semeru/semeru_datasets/code_xglue/code-to-code/Defect-detection in Semeru
# CodeXGLUE -- Defect Detection
## Task Definition
Given a source code, the task is to identify whether it is an insecure code that may attack software systems, such as resource leaks, use-after-free vulnerabilities and DoS attack. We treat the task as binary classification (0/1), where 1 stands for insecure code and 0 for secure code.
### Dataset
The dataset we use comes from the paper [*Devign*: Effective Vulnerability Identification by Learning Comprehensive Program Semantics via Graph Neural Networks](http://papers.nips.cc/paper/9209-devign-effective-vulnerability-identification-by-learning-comprehensive-program-semantics-via-graph-neural-networks.pdf). We combine all projects and split 80%/10%/10% for training/dev/test.
### Data Format
Three pre-processed .jsonl files, i.e. train.jsonl, valid.jsonl, test.jsonl are present
For each file, each line in the uncompressed file represents one function. One row is illustrated below.
- **func:** the source code
- **target:** 0 or 1 (vulnerability or not)
- **idx:** the index of example
### Data Statistics
Data statistics of the dataset are shown in the below table:
| | #Examples |
| ----- | :-------: |
| Train | 21,854 |
| Dev | 2,732 |
| Test | 2,732 |
## Reference
<pre><code>@inproceedings{zhou2019devign,
title={Devign: Effective vulnerability identification by learning comprehensive program semantics via graph neural networks},
author={Zhou, Yaqin and Liu, Shangqing and Siow, Jingkai and Du, Xiaoning and Liu, Yang},
booktitle={Advances in Neural Information Processing Systems},
pages={10197--10207},
year={2019}
}</code></pre>
|
semeru/code-code-DefectDetection
|
[
"license:mit",
"region:us"
] |
2023-03-22T03:30:09+00:00
|
{"license": "mit", "Programminglanguage": "C", "version": "N/A", "Date": "Devign(Jun 2019 - paper release date)", "Contaminated": "Very Likely", "Size": "Standard Tokenizer"}
|
2023-03-27T20:16:02+00:00
|
a6c52e2ba656380eb2ddabeddc55c8b3c853f624
|
# Dataset Card for "data_test"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
linhqyy/data_test
|
[
"region:us"
] |
2023-03-22T03:34:07+00:00
|
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 9016799768.976877, "num_examples": 64418}, {"name": "test", "num_bytes": 1001925014.6631207, "num_examples": 7158}], "download_size": 10000087591, "dataset_size": 10018724783.639997}}
|
2023-03-22T11:18:40+00:00
|
cb7deb606db022364ebc2639303c0560f49b4714
|
### Dataset is imported from CodeXGLUE and pre-processed using their script.
# Where to find in Semeru:
The dataset can be found at /nfs/semeru/semeru_datasets/code_xglue/code-to-code/Method-Generation/dataset/codexglue_method_generation in Semeru
# CodeXGLUE -- Method Generation
Here is the introduction and pipeline for method generation task.
## Task Definition
Method generation is the prediction of a method body implementation conditioned on a signature, a docstring, and any more context.
## Dataset
We use CodeSearchNet Python dataset. The CodeSearchNet repositories are re-downloaded to extract all the methods, including their signatures, docstrings and bodies. We remove the methods that don't have docstrings and whose name contains 'test'. We preserve the context around this method for auxiliary information since it is really a difficult task to generator the method body only based on its signature/docstring. We also apply literal normalization for better user experience.
### Data Format
The data format of each line in `train/dev/test.jsonl` is:
```json
{
"signature": "def do_transform(self, v=<NUM_LIT:1>):",
"body": "if not self.transform:<EOL><INDENT>return<EOL><DEDENT>try:<EOL><INDENT>self.latest_value = utils.Transform ...",
"docstring": "Apply the transformation (if it exists) to the latest_value",
"id": "f19:c4:m1"
}
```
The `id` indicts where you can find this method in the raw data. In this instance, it means the 2nd method in the 2nd class in the 19th file. We apply literal normalization to function signature and body, replace `\n` with `<EOL>` and keep track in INDENT and DEDENT.
### Data Statistics
Data statistics are shown in the below table.
| Data Split | #Instances |
| ----------- | :---------: |
| Train | 893,538 |
| Dev | 20,000 |
| Test | 20,000 |
## Reference
<pre><code>@article{clement2021long,
title={Long-Range Modeling of Source Code Files with eWASH: Extended Window Access by Syntax Hierarchy},
author={Clement, Colin B and Lu, Shuai and Liu, Xiaoyu and Tufano, Michele and Drain, Dawn and Duan, Nan and Sundaresan, Neel and Svyatkovskiy, Alexey},
journal={arXiv preprint arXiv:2109.08780},
year={2021}
}</code></pre>
|
semeru/code-code-MethodGeneration
|
[
"license:mit",
"region:us"
] |
2023-03-22T03:34:23+00:00
|
{"license": "mit", "Programminglanguage": "python", "version": "N/A", "Date": "Codesearchnet(Jun 2020 - paper release date)", "Contaminated": "Very Likely", "Size": "Standard Tokenizer (TreeSitter)"}
|
2023-03-27T17:16:31+00:00
|
5d15ed5a154afe64eb40d8793bd90bf254f76585
|
KoddaDuck/41_4
|
[
"license:mit",
"region:us"
] |
2023-03-22T03:55:28+00:00
|
{"license": "mit"}
|
2023-03-22T03:55:28+00:00
|
|
7050c3d72eda1097b4ca192b38df7e6ef902db3c
|
Lewislou/cell_samples
|
[
"license:apache-2.0",
"region:us"
] |
2023-03-22T04:28:57+00:00
|
{"license": "apache-2.0"}
|
2023-03-22T04:29:48+00:00
|
|
ea9a5067cdb2fba5a1d49b7392f61b5ae820aec8
|
# Dataset Card for "wikipedia_stage2_coverage_20230316"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
MartinKu/wikipedia_stage2_coverage_20230316
|
[
"region:us"
] |
2023-03-22T05:06:39+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "S_V_position", "sequence": "int64"}, {"name": "O_C_position", "sequence": "int64"}, {"name": "start_point_list", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 58992077079, "num_examples": 1054477}], "download_size": 18229138004, "dataset_size": 58992077079}}
|
2023-03-23T18:28:17+00:00
|
7878f4929cce3aba9ce8371f83b32e0eeb2b83dc
|
# ChatGPT3.5 Noisy Translation Facebook
Notebooks at https://github.com/mesolitica/malaysian-dataset/tree/master/translation/chatgpt3.5-facebook
|
mesolitica/chatgpt-noisy-translation-facebook
|
[
"task_categories:translation",
"language:ms",
"region:us"
] |
2023-03-22T05:10:38+00:00
|
{"language": ["ms"], "task_categories": ["translation"]}
|
2023-12-17T04:06:33+00:00
|
594ef28b50307c40a756df785a72296cf55e4872
|
# ChatGPT3.5 Noisy Translation IIUM Confession
Notebooks at https://github.com/mesolitica/malaysian-dataset/tree/master/translation/chatgpt3.5-iium-confession
|
mesolitica/chatgpt-noisy-translation-iium-confession
|
[
"task_categories:translation",
"language:ms",
"region:us"
] |
2023-03-22T05:11:08+00:00
|
{"language": ["ms"], "task_categories": ["translation"]}
|
2023-12-17T04:07:11+00:00
|
1277505f152d3e19d5b7f0169e28a533d4a6b445
|
# KoddaDuck/Cylonix_ASR_dataset
|
KoddaDuck/Cylonix_ASR_dataset
|
[
"task_categories:automatic-speech-recognition",
"annotations_creators:crowdsourced",
"language_creators:crowdsourced",
"multilinguality:multilingual",
"source_datasets:extended|common_voice",
"license:cc0-1.0",
"region:us"
] |
2023-03-22T06:03:52+00:00
|
{"annotations_creators": ["crowdsourced"], "language_creators": ["crowdsourced"], "license": ["cc0-1.0"], "multilinguality": ["multilingual"], "size_categories": {"zh-CN": ["100K<n<10M"]}, "source_datasets": ["extended|common_voice"], "task_categories": ["automatic-speech-recognition"], "task_ids": [], "paperswithcode_id": "common-voice", "pretty_name": "Common Voice Corpus 11.0", "language_bcp47": ["zh-CN"], "extra_gated_prompt": "By clicking on \u201cAccess repository\u201d below, you also agree to not attempt to determine the identity of speakers in the Common Voice dataset."}
|
2023-03-27T00:53:16+00:00
|
6399237ddf55ca0e3e83a63b7fa0a71dda7dbeb9
|
NanashinoSeito/tmp
|
[
"license:apache-2.0",
"region:us"
] |
2023-03-22T07:33:52+00:00
|
{"license": "apache-2.0"}
|
2023-03-22T07:35:39+00:00
|
|
001dad396b843fa88b0b1a637e71cdc8206bab09
|
# Dataset Card for "thaigov-v2-corpus-22032023"
This corpus made from Thaigov v2 corpus in 22 Mar 2023. [https://github.com/PyThaiNLP/thaigov-v2-corpus/releases/tag/22032023](https://github.com/PyThaiNLP/thaigov-v2-corpus/releases/tag/22032023)
Corups: [https://github.com/PyThaiNLP/thaigov-v2-corpus](https://github.com/PyThaiNLP/thaigov-v2-corpus)
## English
- Data from Thai government website. https://www.thaigov.go.th
- This part of PyThaiNLP Project.
- Compiled by Mr.Wannaphong Phatthiyaphaibun
- License Dataset is public domain.
## Data format
- 1 file, 1 news, which is extracted from 1 url.
```
topic
(Blank line)
content
content
content
content
content
(Blank line)
ที่มา (URL source) : http://www.thaigov.go.th/news/contents/details/NNN
```
## Thai
- เป็นข้อมูลที่รวบรวมข่าวสารจากเว็บไซต์รัฐบาลไทย https://www.thaigov.go.th
- โครงการนี้เป็นส่วนหนึ่งในแผนพัฒนา [PyThaiNLP](https://github.com/PyThaiNLP/)
- รวบรวมโดย นาย วรรณพงษ์ ภัททิยไพบูลย์
- ข้อมูลที่รวบรวมในคลังข้อความนี้เป็นสาธารณสมบัติ (public domain) ตามพ.ร.บ.ลิขสิทธิ์ พ.ศ. 2537 มาตรา 7 (สิ่งต่อไปนี้ไม่ถือว่าเป็นงานอันมีลิขสิทธิ์ตามพระราชบัญญัตินี้ (1) ข่าวประจำวัน และข้อเท็จจริงต่างๆ ที่มีลักษณะเป็นเพียงข่าวสารอันมิใช่งานในแผนกวรรณคดี แผนกวิทยาศาสตร์ หรือแผนกศิลปะ [...] (3) ระเบียบ ข้อบังคับ ประกาศ คำสั่ง คำชี้แจง และหนังสือตอบโต้ของกระทรวง ทบวง กรม หรือหน่วยงานอื่นใดของรัฐหรือของท้องถิ่น [...])
**สามารถติดตามประวัติการแก้ไขคลังข้อความนี้ได้ผ่านระบบ Git**
### จำนวนข่าว
- วันเริ่มต้นโครงการ 17 ก.ย. 2563
### รูปแบบข้อมูล
- 1 ไฟล์ 1 ข่าว ซึ่งดึงมาจาก 1 url
```
หัวเรื่อง
(บรรทัดว่าง)
เนื้อความ
เนื้อความ
เนื้อความ
เนื้อความ
เนื้อความ
(บรรทัดว่าง)
ที่มา : http://www.thaigov.go.th/news/contents/details/NNN
```
### รายละเอียดชื่อไฟล์
- ชื่อหมวดหมู่_จำนวนที่ของข่าว.txt
### Script
- run.py สำหรับเก็บข้อมูลจากหน้าเว็บ โดยจะดึงหน้าเว็บจาก url ```http://www.thaigov.go.th/news/contents/details/NNN``` โดยที่ NNN คือเลขจำนวนเต็ม
- เปลี่ยนค่าตัวแปร i ในไฟล์เป็นเลขที่ต้องการเริ่มเก็บ
- clean.py สำหรับทำความสะอาดข้อมูลเบื้องต้น โดยจะลบช่องว่างหน้าและท้ายบรรทัด ลบบรรทัดว่าง
- ```clean.py ชื่อไฟล์```
- ```clean.py ชื่อไฟล์1 ชื่อไฟล์2```
- ```clean.py *.txt```
We build Thai NLP.
PyThaiNLP
|
pythainlp/thaigov-v2-corpus-22032023
|
[
"size_categories:10K<n<100K",
"language:th",
"license:cc0-1.0",
"region:us"
] |
2023-03-22T07:57:03+00:00
|
{"language": ["th"], "license": "cc0-1.0", "size_categories": ["10K<n<100K"], "dataset_info": {"features": [{"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "url", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 252319219, "num_examples": 30380}], "download_size": 85313027, "dataset_size": 252319219}}
|
2023-03-22T08:44:49+00:00
|
29cd2c829dd10454e42c0f1fcfc9ef18160891c7
|
Dataset generated using handwritten fonts
=========================================
Number of images: 300000
Sources:
* [Handwriting generation code](https://github.com/NastyBoget/HandwritingGeneration)
The code was executed with `cyrillic` option (more augmentations)
|
nastyboget/synthetic_cyrillic
|
[
"task_categories:image-to-text",
"size_categories:100K<n<1M",
"language:ru",
"license:mit",
"region:us"
] |
2023-03-22T08:18:18+00:00
|
{"language": ["ru"], "license": "mit", "size_categories": ["100K<n<1M"], "task_categories": ["image-to-text"]}
|
2023-03-23T18:45:21+00:00
|
1c24f619ae44d0a4c87fde72efa741dc25f7530e
|
### Dataset Summary
This dataset is based on [CloverSearch/cc-news-mutlilingual](https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual).
We add a script to support access multilingual CC-News dataset with HuggingFace datasets API instead of directly downloading raw data files.
### Data Fields
- `title`: a `string` feature.
- `maintext`: a `string` feature.
- `url`: a `string` feature.
- `date_publish`: a `string` feature.
### How to use this dataset
You can load any subset of CC-News per language:
```python
from datasets import load_dataset
dataset = load_dataset("intfloat/multilingual_cc_news", languages=["af"])
```
## Supported Languages
```
af
als
am
an
ar
arz
as
ast
av
az
azb
ba
bar
bcl
be
bg
bh
bn
bo
bpy
br
bs
bxr
ca
cbk
ce
ceb
ckb
co
cs
cv
cy
da
de
diq
dsb
dty
dv
el
eml
en
eo
es
et
eu
fa
fi
fr
fy
ga
gd
gl
gn
gom
gu
gv
he
hi
hif
hr
hsb
ht
hu
hy
ia
id
ie
ilo
io
is
it
ja
jbo
jv
ka
kk
km
kn
ko
krc
ku
kv
kw
ky
la
lb
lez
li
lmo
lo
lt
lv
mai
mg
mhr
min
mk
ml
mn
mr
mrj
ms
mt
mwl
my
myv
mzn
nah
nap
nds
ne
new
nl
nn
no
oc
or
os
pa
pam
pfl
pl
pms
pnb
ps
pt
qu
rm
ro
ru
sa
sah
sc
scn
sco
sd
sh
si
sk
sl
so
sq
sr
su
sv
sw
ta
te
tg
th
tk
tl
tr
tt
tyv
ug
uk
ur
uz
vec
vep
vi
vls
vo
wa
war
wuu
xal
xmf
yi
yo
yue
zh
```
|
intfloat/multilingual_cc_news
|
[
"size_categories:100M<n<1B",
"language:en",
"language:zh",
"language:fr",
"language:de",
"language:af",
"language:ar",
"region:us"
] |
2023-03-22T08:25:34+00:00
|
{"language": ["en", "zh", "fr", "de", "af", "ar"], "size_categories": ["100M<n<1B"]}
|
2023-04-23T07:19:06+00:00
|
6db8d09e3edeaf2f99dd075a31c3788461adeb59
|
# Dataset Card for "thaigov-v2-corpus-22032023-oa"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
pythainlp/thaigov-v2-corpus-22032023-oa
|
[
"region:us"
] |
2023-03-22T08:33:37+00:00
|
{"dataset_info": {"features": [{"name": "TEXT", "dtype": "string"}, {"name": "SOURCE", "dtype": "string"}, {"name": "url", "struct": [{"name": "url", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 241455880, "num_examples": 30380}], "download_size": 81088077, "dataset_size": 241455880}}
|
2023-03-22T08:35:10+00:00
|
fa09fc9938f43a55c522def5a0ec83ffaa10052f
|
# Dataset Card for [Dataset Name]
## Table of Contents
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:**
- **Repository:**
- **Paper:**
- **Leaderboard:**
- **Point of Contact:**
### Dataset Summary
[More Information Needed]
### Supported Tasks and Leaderboards
[More Information Needed]
### Languages
[More Information Needed]
## Dataset Structure
### Data Instances
[More Information Needed]
### Data Fields
[More Information Needed]
### Data Splits
[More Information Needed]
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
[More Information Needed]
### Citation Information
[More Information Needed]
### Contributions
Thanks to [@github-username](https://github.com/<github-username>) for adding this dataset.
|
albertvillanova/test
|
[
"annotations_creators:expert-generated",
"multilinguality:monolingual",
"language:pl",
"license:mit",
"region:us"
] |
2023-03-22T08:42:24+00:00
|
{"annotations_creators": ["expert-generated"], "language": ["pl"], "license": ["mit"], "multilinguality": ["monolingual"], "dataset_info": [{"config_name": "config", "features": [{"name": "audio_id", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "text", "dtype": "string"}]}]}
|
2023-03-21T13:27:40+00:00
|
4a01b0a31b572eada953700280cd53250187de59
|
[행정 문서 대상 기계독해 데이터](https://aihub.or.kr/aihubdata/data/view.do?currMenu=115&topMenu=100&aihubDataSe=realm&dataSetSn=569)
|
wisenut-nlp-team/aihub_mrc_admin
|
[
"task_categories:question-answering",
"task_ids:extractive-qa",
"task_ids:closed-domain-qa",
"annotations_creators:crowdsourced",
"language_creators:found",
"size_categories:10M<n<100M",
"source_datasets:original",
"license:cc-by-4.0",
"mrc",
"region:us"
] |
2023-03-22T09:04:56+00:00
|
{"annotations_creators": ["crowdsourced"], "language_creators": ["found"], "language": [], "license": ["cc-by-4.0"], "multilinguality": [], "size_categories": ["10M<n<100M"], "source_datasets": ["original"], "task_categories": ["question-answering"], "task_ids": ["extractive-qa", "closed-domain-qa"], "pretty_name": "wisenut-nlp-team/aihub_mrc_admin", "dataset_info": {"features": [{"name": "context", "dtype": "string"}, {"name": "doc_id", "dtype": "string"}, {"name": "doc_title", "dtype": "string"}, {"name": "doc_source", "dtype": "string"}, {"name": "doc_published", "dtype": "int64"}, {"name": "doc_class", "struct": [{"name": "class", "dtype": "string"}, {"name": "code", "dtype": "string"}]}, {"name": "created", "dtype": "string"}, {"name": "qa_type", "dtype": "int64"}, {"name": "question_id", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "is_impossible", "dtype": "bool"}, {"name": "answers", "struct": [{"name": "answer_start", "dtype": "int64"}, {"name": "clue_start", "dtype": "int64"}, {"name": "clue_text", "dtype": "string"}, {"name": "options", "sequence": "string"}, {"name": "text", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 646034916, "num_examples": 329464}, {"name": "validation", "num_bytes": 80855200, "num_examples": 41182}], "download_size": 243811004, "dataset_size": 726890116}, "tags": ["mrc"]}
|
2023-05-23T23:08:00+00:00
|
857eead1f5b4348f37d32648865fa41abdb8f2a5
|
Skimm3r918/lovetogether
|
[
"license:creativeml-openrail-m",
"region:us"
] |
2023-03-22T09:12:35+00:00
|
{"license": "creativeml-openrail-m"}
|
2023-03-22T09:13:33+00:00
|
|
1eca1fa57e387473361ebe828698fd93ebc06b18
|
# Dataset Card for "instructions-id-small"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
cahya/instructions-id-small
|
[
"region:us"
] |
2023-03-22T09:28:19+00:00
|
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 48844.8, "num_examples": 90}, {"name": "test", "num_bytes": 2713.6, "num_examples": 5}, {"name": "validation", "num_bytes": 2713.6, "num_examples": 5}], "download_size": 36845, "dataset_size": 54272.0}}
|
2023-03-22T09:28:34+00:00
|
a7ff39d2bccb3d0b41f9e15d2e670c7db5790147
|
# Dataset Card for "instructions-id"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
cahya/instructions-id
|
[
"region:us"
] |
2023-03-22T09:30:59+00:00
|
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 35749284.66851785, "num_examples": 85242}, {"name": "test", "num_bytes": 1986211.1657410732, "num_examples": 4736}, {"name": "validation", "num_bytes": 1986211.1657410732, "num_examples": 4736}], "download_size": 21158281, "dataset_size": 39721706.99999999}}
|
2023-03-22T12:47:41+00:00
|
d74da450df1b05fac0440394ac17189c4c5519e0
|
Dataset collected as part of work done for Alvarado Lab Queens College, CUNY (https://www.alvaradolab.com/)
Github: https://www.github.com/QCAlvaradoLab/Color-Study
|
hans0812/fish-composite-segmentation
|
[
"license:afl-3.0",
"region:us"
] |
2023-03-22T09:31:33+00:00
|
{"license": "afl-3.0"}
|
2023-03-25T11:12:23+00:00
|
f28293505a59e1e1c1457f6aa5c3d1fe3b57f8d3
|
MesutUnutur/image-demo
|
[
"task_categories:text-to-image",
"region:us"
] |
2023-03-22T09:36:11+00:00
|
{"task_categories": ["text-to-image"]}
|
2023-03-22T11:01:50+00:00
|
|
9ca3a2b38dd1dd14739a5144b225df0344f1460d
|
---
# Bigcode PII Training Dataset
## Dataset Description
This is the dataset used for the training of [bigcode-pii-model](https://huggingface.co/bigcode/bigcode-pii-model) (after training on pseudo-labeled data).
It is a concatenation of an early version of [bigcode-pii-dataset](https://huggingface.co/datasets/bigcode/bigcode-pii-dataset) which had less samples, and [pii-for-code](https://huggingface.co/datasets/bigcode/pii-for-code-v2)
(a dataset with 400 files we annotated in a previous iteration: MORE INFO TO BE ADDED).
Files with `AMBIGUOUS` and `ID` were excluded. Each PII subtype was remaped to it supertype.
## Statistics
The dataset consists of **11878** files in 31 programming languages. More statistics and information about the original annotated dataset can be found at the dataset card of: [bigcode-pii-dataset](https://huggingface.co/datasets/bigcode/bigcode-pii-dataset).
We provide the training and test splits we used for the training and evaluation of the [bigcode-pii-model](https://huggingface.co/bigcode/bigcode-pii-model).
Below is the distribution of PII entoties in each split.
| Entity type | Train | Validation |
|--------------|-------|------------|
| EMAIL | 4721 | 1742 |
| NAME | 3847 | 1298 |
| IP_ADDRESS | 1941 | 521 |
| USERNAME | 1320 | 346 |
| PASSWORD | 390 | 148 |
| KEY | 171 | 118 |
# How to use
```python
from datasets import load_dataset
ds = load_dataset("bigcode/bigcode-pii-dataset-training")
```
```
DatasetDict({
train: Dataset({
features: ['id', 'content', 'language', 'pii', 'assignment_id'],
num_rows: 7878
})
validation: Dataset({
features: ['id', 'content', 'language', 'pii', 'assignment_id'],
num_rows: 4000
})
})
```
# Considerations for Using the Data
When using this dataset, please be mindful of the data governance risks that come with handling personally identifiable information (PII). Despite sourcing the data from open, permissive GitHub repositories and having it annotated by fairly paid crowd-workers, it does contain sensitive details such as names, usernames, keys, emails, passwords, and IP addresses. To ensure responsible use for research within the open-source community, access to the dataset will be provided through a gated mechanism.
We expect researchers and developers working with the dataset to adhere to the highest ethical standards and employ robust data protection measures.
To assist users in effectively detecting and masking PII, we've also released a PII model trained on this dataset.
Our goal in providing access to both the dataset and the PII model is to foster the development of privacy-preserving AI technologies while minimizing potential risks related to handling PII.
|
bigcode/bigcode-pii-dataset-training
|
[
"region:us"
] |
2023-03-22T10:30:49+00:00
|
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "content", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "pii", "list": [{"name": "context", "dtype": "string"}, {"name": "end", "dtype": "int64"}, {"name": "start", "dtype": "int64"}, {"name": "tag", "dtype": "string"}, {"name": "value", "dtype": "string"}]}, {"name": "assignment_id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 17215712, "num_examples": 7878}, {"name": "validation", "num_bytes": 7302111, "num_examples": 4000}], "download_size": 10754489, "dataset_size": 24517823}, "extra_gated_prompt": "## Terms of Use for the dataset\n\nThis is an annotated dataset for Personal Identifiable Information (PII) in code. We ask that you read and agree to the following Terms of Use before using the dataset and fill this [form](https://docs.google.com/forms/d/e/1FAIpQLSfiWKyBB8-PxOCLo-KMsLlYNyQNJEzxJw0gcUAUHT3UY848qA/viewform):\n1. You agree that you will not use the PII dataset for any purpose other than training or evaluating models for PII removal from datasets.\n2. You agree that you will not share the PII dataset or any modified versions for whatever purpose.\n3. Unless required by applicable law or agreed to in writing, the dataset is provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using the dataset, and assume any risks associated with your exercise of permissions under these Terms of Use.\n4. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE DATASET OR THE USE OR OTHER DEALINGS IN THE DATASET.", "extra_gated_fields": {"Email": "text", "I have read the License and agree with its terms": "checkbox"}}
|
2023-05-11T11:46:10+00:00
|
b317a0dd49ee1830df790e3bf84d8e3fb9403e82
|
# Dataset Card for "basemath"
The objective of minimath is to train the mathematical capability of language model in a diverse setting.
The dataset is composed of sampling from the below dataset:
https://huggingface.co/datasets/math_dataset
https://huggingface.co/datasets/math_qa
https://huggingface.co/datasets/competition_math
https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_math_jsonl
https://huggingface.co/datasets/qwedsacf/grade-school-math-instructions
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
kenhktsui/basemath
|
[
"region:us"
] |
2023-03-22T10:47:42+00:00
|
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "source", "dtype": "string"}, {"name": "Rationale", "dtype": "string"}, {"name": "annotated_formula", "dtype": "string"}, {"name": "linear_formula", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 57454541, "num_examples": 100000}], "download_size": 28978379, "dataset_size": 57454541}}
|
2023-03-22T10:55:24+00:00
|
f17105a540fde74ff651b6a0690a55ce1cbfcb31
|
Russian translations of Murakami novels, to fine-tune a generative language model. Originally downloaded from the FB2 archive http://flibusta.is/a/8570.
|
vldsavelyev/murakami
|
[
"region:us"
] |
2023-03-22T10:55:51+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 18352678, "num_examples": 699}, {"name": "test", "num_bytes": 48421, "num_examples": 1}], "download_size": 7244777, "dataset_size": 18401099}}
|
2023-03-25T12:29:55+00:00
|
9a5fcecac061f17d9ed9e89d3cc7408537260833
|
# Dataset Card for "instructions-all"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
cahya/instructions-all
|
[
"region:us"
] |
2023-03-22T12:03:36+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "validation", "num_bytes": 11937376.0, "num_examples": 23011}, {"name": "test", "num_bytes": 11885818.0, "num_examples": 23027}, {"name": "train", "num_bytes": 392052453.0, "num_examples": 751788}], "download_size": 264089102, "dataset_size": 415875647.0}}
|
2023-03-22T16:00:28+00:00
|
b269f8ee652ce5bf75b97c7281c3e7f1fc267eb3
|
michaelthwan/wiki_qa_bart_10000row
|
[
"license:mit",
"region:us"
] |
2023-03-22T12:26:01+00:00
|
{"license": "mit"}
|
2023-03-22T12:26:28+00:00
|
|
0f049bbf7917e7becff175cb30aaba5ebec125fe
|
merror/custom
|
[
"license:other",
"region:us"
] |
2023-03-22T12:53:11+00:00
|
{"license": "other"}
|
2023-03-22T12:53:11+00:00
|
|
179da02c2b40ea1c965798fffa053a2c8dc8c3f2
|
semeru/completeformer_java_data
|
[
"license:cc-by-4.0",
"region:us"
] |
2023-03-22T12:53:35+00:00
|
{"license": "cc-by-4.0"}
|
2023-03-24T21:00:19+00:00
|
|
f7038d331f00bf59950361d415e2c12e1216f613
|
# Dataset Card for "disc_cla_primera-2"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Sleoruiz/disc_cla_primera-2
|
[
"region:us"
] |
2023-03-22T13:20:29+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "null"}, {"name": "inputs", "struct": [{"name": "comision", "dtype": "string"}, {"name": "fecha_gaceta", "dtype": "string"}, {"name": "gaceta_numero", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "text", "dtype": "string"}]}, {"name": "prediction", "list": [{"name": "label", "dtype": "string"}, {"name": "score", "dtype": "float64"}]}, {"name": "prediction_agent", "dtype": "string"}, {"name": "annotation", "sequence": "string"}, {"name": "annotation_agent", "dtype": "string"}, {"name": "multi_label", "dtype": "bool"}, {"name": "explanation", "dtype": "null"}, {"name": "id", "dtype": "string"}, {"name": "metadata", "dtype": "null"}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "timestamp[us]"}, {"name": "metrics", "struct": [{"name": "text_length", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 32328969, "num_examples": 11713}], "download_size": 16700030, "dataset_size": 32328969}}
|
2023-03-22T13:20:32+00:00
|
5755c44709eb0008f6e85461dd3bb2a1641c4739
|
# Dataset Card for "disc_cla_segunda-2"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Sleoruiz/disc_cla_segunda-2
|
[
"region:us"
] |
2023-03-22T13:20:54+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "null"}, {"name": "inputs", "struct": [{"name": "comision", "dtype": "string"}, {"name": "fecha_gaceta", "dtype": "string"}, {"name": "gaceta_numero", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "text", "dtype": "string"}]}, {"name": "prediction", "list": [{"name": "label", "dtype": "string"}, {"name": "score", "dtype": "float64"}]}, {"name": "prediction_agent", "dtype": "string"}, {"name": "annotation", "sequence": "string"}, {"name": "annotation_agent", "dtype": "string"}, {"name": "multi_label", "dtype": "bool"}, {"name": "explanation", "dtype": "null"}, {"name": "id", "dtype": "string"}, {"name": "metadata", "dtype": "null"}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "timestamp[us]"}, {"name": "metrics", "struct": [{"name": "text_length", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 16832400, "num_examples": 7327}], "download_size": 8416037, "dataset_size": 16832400}}
|
2023-03-22T13:20:57+00:00
|
9d0aa7abc72e9a1d997ca3bc0158fbe9c44cb2ed
|
# Dataset Card for "disc_cla_tercera-2"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Sleoruiz/disc_cla_tercera-2
|
[
"region:us"
] |
2023-03-22T13:21:10+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "null"}, {"name": "inputs", "struct": [{"name": "comision", "dtype": "string"}, {"name": "fecha_gaceta", "dtype": "string"}, {"name": "gaceta_numero", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "text", "dtype": "string"}]}, {"name": "prediction", "list": [{"name": "label", "dtype": "string"}, {"name": "score", "dtype": "float64"}]}, {"name": "prediction_agent", "dtype": "string"}, {"name": "annotation", "sequence": "string"}, {"name": "annotation_agent", "dtype": "string"}, {"name": "multi_label", "dtype": "bool"}, {"name": "explanation", "dtype": "null"}, {"name": "id", "dtype": "string"}, {"name": "metadata", "dtype": "null"}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "timestamp[us]"}, {"name": "metrics", "struct": [{"name": "text_length", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 8845905, "num_examples": 4913}], "download_size": 4405442, "dataset_size": 8845905}}
|
2023-03-22T13:21:12+00:00
|
21577af3e9b8cb7cdc906e1ee872749de9b9d08c
|
# Dataset Card for "disc_cla_-cuarta-2"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Sleoruiz/disc_cla_cuarta-2
|
[
"region:us"
] |
2023-03-22T13:29:14+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "null"}, {"name": "inputs", "struct": [{"name": "comision", "dtype": "string"}, {"name": "fecha_gaceta", "dtype": "string"}, {"name": "gaceta_numero", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "text", "dtype": "string"}]}, {"name": "prediction", "list": [{"name": "label", "dtype": "string"}, {"name": "score", "dtype": "float64"}]}, {"name": "prediction_agent", "dtype": "string"}, {"name": "annotation", "sequence": "string"}, {"name": "annotation_agent", "dtype": "string"}, {"name": "multi_label", "dtype": "bool"}, {"name": "explanation", "dtype": "null"}, {"name": "id", "dtype": "string"}, {"name": "metadata", "dtype": "null"}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "timestamp[us]"}, {"name": "metrics", "struct": [{"name": "text_length", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 8052385, "num_examples": 3349}], "download_size": 4065041, "dataset_size": 8052385}}
|
2023-03-22T13:29:17+00:00
|
b556ca40f395999f250e9a3d69ca85db0329308d
|
# Dataset Card for "disc_cla_quinta-2"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Sleoruiz/disc_cla_quinta-2
|
[
"region:us"
] |
2023-03-22T13:29:37+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "null"}, {"name": "inputs", "struct": [{"name": "comision", "dtype": "string"}, {"name": "fecha_gaceta", "dtype": "string"}, {"name": "gaceta_numero", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "text", "dtype": "string"}]}, {"name": "prediction", "list": [{"name": "label", "dtype": "string"}, {"name": "score", "dtype": "float64"}]}, {"name": "prediction_agent", "dtype": "string"}, {"name": "annotation", "sequence": "string"}, {"name": "annotation_agent", "dtype": "string"}, {"name": "multi_label", "dtype": "bool"}, {"name": "explanation", "dtype": "null"}, {"name": "id", "dtype": "string"}, {"name": "metadata", "dtype": "null"}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "timestamp[us]"}, {"name": "metrics", "struct": [{"name": "text_length", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 20629809, "num_examples": 7507}], "download_size": 10652869, "dataset_size": 20629809}}
|
2023-03-22T13:29:40+00:00
|
2ee74199b698ebd72d2959642e6e70c0c2355fb5
|
# Dataset Card for "disc_cla_sexta-2"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Sleoruiz/disc_cla_sexta-2
|
[
"region:us"
] |
2023-03-22T13:29:59+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "null"}, {"name": "inputs", "struct": [{"name": "comision", "dtype": "string"}, {"name": "fecha_gaceta", "dtype": "string"}, {"name": "gaceta_numero", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "text", "dtype": "string"}]}, {"name": "prediction", "list": [{"name": "label", "dtype": "string"}, {"name": "score", "dtype": "float64"}]}, {"name": "prediction_agent", "dtype": "string"}, {"name": "annotation", "sequence": "string"}, {"name": "annotation_agent", "dtype": "string"}, {"name": "multi_label", "dtype": "bool"}, {"name": "explanation", "dtype": "null"}, {"name": "id", "dtype": "string"}, {"name": "metadata", "dtype": "null"}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "timestamp[us]"}, {"name": "metrics", "struct": [{"name": "text_length", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 15176429, "num_examples": 7591}], "download_size": 7564523, "dataset_size": 15176429}}
|
2023-03-22T13:30:02+00:00
|
1e8620869df7451874edd56d70393bea4c648b6f
|
# Dataset Card for "disc_cla_septima-2"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Sleoruiz/disc_cla_septima-2
|
[
"region:us"
] |
2023-03-22T13:30:23+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "null"}, {"name": "inputs", "struct": [{"name": "comision", "dtype": "string"}, {"name": "fecha_gaceta", "dtype": "string"}, {"name": "gaceta_numero", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "text", "dtype": "string"}]}, {"name": "prediction", "list": [{"name": "label", "dtype": "string"}, {"name": "score", "dtype": "float64"}]}, {"name": "prediction_agent", "dtype": "string"}, {"name": "annotation", "sequence": "string"}, {"name": "annotation_agent", "dtype": "string"}, {"name": "multi_label", "dtype": "bool"}, {"name": "explanation", "dtype": "null"}, {"name": "id", "dtype": "string"}, {"name": "metadata", "dtype": "null"}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "timestamp[us]"}, {"name": "metrics", "struct": [{"name": "text_length", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 21725404, "num_examples": 9432}], "download_size": 10861388, "dataset_size": 21725404}}
|
2023-03-22T13:30:26+00:00
|
577cd1ae3d4de3c3115c68f047319b39933792a0
|
# Dataset Card for AIDA CoNLL-YAGO Wikidata
## Table of Contents
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks](#supported-tasks)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Additional Information](#additional-information)
- [Licensing Information](#licensing-information)
## Dataset Description
- **Repository:** [AIDA CoNLL-YAGO Wikidata repository](https://github.com/cyanic-selkie/aida-conll-yago-wikidata)
### Dataset Summary
The AIDA CoNLL-YAGO Wikidata dataset is the same as the original [AIDA CoNLL-YAGO](https://www.mpi-inf.mpg.de/departments/databases-and-information-systems/research/ambiverse-nlu/aida/downloads) dataset, but with Wikidata QIDs instead of Wikipedia titles as entity identifiers. They are automatically generated (with a few manual corrections) from Wikidata and Wikipedia dumps (March 1, 2023).
The code for generating the dataset can be found [here](https://github.com/cyanic-selkie/aida-conll-yago-wikidata).
### Supported Tasks
- `named-entity-recognition`: The dataset can be used to train a model for Named Entity Recognition.
- `named-entity-linking`: The dataset can be used to train a model for Named Entity Linking.
### Languages
The text in the dataset is in English. The associated BCP-47 code is `en`.
## Dataset Structure
### Data Instances
A typical data point represents a document (news article).
The `text` field contains the original text in an NFC normalized, UTF-8 encoded string.
The `entities` field contains a list of entities, each represented by a struct with the inclusive starting byte `start` field, exclusive ending byte `end` field, a nullable `qid` field, and a nullable `pageid` field.
Additionally, each document has a unique `document_id` field.
An example from the AIDA CoNLL-YAGO Wikidata test set looks as follows:
```
{
"document_id": 1214,
"text": "RADIO ROMANIA AFTERNOON HEALINES AT 4 PM . BUCHAREST 1996-12-06 Radio Romania news headlines : * The Democratic Convention signed an agreement on government and parliamentary support with its coalition partners the Social Democratic Union and the Hungarian Democratic Union ( UDMR ) . The ceremony was attended by President Emil Constantinescu . * The three parties in the government coalition have committed themselves to a real reform of Romania 's economy , Constantinescu said after the ceremony . * The UDMR wants to contribute to social reform and economic revival in Romania , union leader Marko Bela said . * The international airport in Timisoara and the domestic airports in Arad , Oradea and Sibiu were closed due to fog . -- Bucharest Newsroom 40-1 3120264",
"entities": [
{
"start": 0,
"end": 13,
"tag": "ORG",
"pageid": null,
"qid": null,
"title": null
},
{
"start": 43,
"end": 52,
"tag": "LOC",
"pageid": 36877,
"qid": 19660,
"title": "Bucharest"
},
{
"start": 64,
"end": 77,
"tag": "ORG",
"pageid": null,
"qid": null,
"title": null
},
{
"start": 101,
"end": 122,
"tag": "MISC",
"pageid": null,
"qid": null,
"title": null
},
{
"start": 215,
"end": 238,
"tag": "ORG",
"pageid": null,
"qid": null,
"title": null
},
{
"start": 247,
"end": 273,
"tag": "ORG",
"pageid": null,
"qid": null,
"title": null
},
{
"start": 276,
"end": 280,
"tag": "ORG",
"pageid": 49749134,
"qid": 266582,
"title": "Democratic_Union_of_Hungarians_in_Romania"
},
{
"start": 324,
"end": 343,
"tag": "PER",
"pageid": 393370,
"qid": 299152,
"title": "Emil_Constantinescu"
},
{
"start": 440,
"end": 447,
"tag": "LOC",
"pageid": 25445,
"qid": 218,
"title": "Romania"
},
{
"start": 461,
"end": 475,
"tag": "PER",
"pageid": 393370,
"qid": 299152,
"title": "Emil_Constantinescu"
},
{
"start": 508,
"end": 512,
"tag": "ORG",
"pageid": 49749134,
"qid": 266582,
"title": "Democratic_Union_of_Hungarians_in_Romania"
},
{
"start": 574,
"end": 581,
"tag": "LOC",
"pageid": 25445,
"qid": 218,
"title": "Romania"
},
{
"start": 597,
"end": 607,
"tag": "PER",
"pageid": 1219345,
"qid": 897108,
"title": "Béla_Markó"
},
{
"start": 646,
"end": 655,
"tag": "LOC",
"pageid": 33693389,
"qid": 83404,
"title": "Timişoara"
},
{
"start": 685,
"end": 689,
"tag": "LOC",
"pageid": 22537901,
"qid": 173591,
"title": "Arad,_Romania"
},
{
"start": 692,
"end": 698,
"tag": "LOC",
"pageid": 2024606,
"qid": 2102332,
"title": "Oradea_International_Airport"
},
{
"start": 703,
"end": 708,
"tag": "LOC",
"pageid": 2384413,
"qid": 946418,
"title": "Sibiu_International_Airport"
},
{
"start": 737,
"end": 755,
"tag": "ORG",
"pageid": null,
"qid": null,
"title": null
}
]
}
```
### Data Fields
- `document_id`: an integer that uniquely identifies the document this sentence belongs to
- `sentence_index`: an integer that uniquely identifies the position of the sentence in its original document
- `text`: an NFC normalized, UTF-8 encoded string representing the sentence
- `entities`: a list of structs representing entities, each entity has:
- `start`: an integer representing the inclusive starting UTF-8 code point of the entity
- `end`: an integer representing the exclusive ending UTF-8 code point of the entity
- `tag`: a string representing the entity type (PER, LOC, ORG or MISC)
- `qid`: an integer representing the Wikidata QID this entity refers to; it can be null if the entity didn't exist in Wikidata at the time of the creation of the original dataset
- `pageid`: an integer representing the English Wikipedia's pageID this entity refers to; it can be null if the entity didn't exist in Wikipedia at the time of the creation of the original dataset
- `title`: an NFC normalized, UTF-8 encoded string representing the English Wikipedia's title this entity refers to; it can be null if the entity didn't exist in Wikipedia at the time of the creation of the original dataset
### Data Splits
The data is split into training, validation and test sets; all of the sentences belonging to an article are in the same split. The final split sizes are as follows:
| | Train | Validation | Test |
| :----- | :------: | :-----: | :----: |
| AIDA CoNLL-YAGO Wikidata - documents | 946 | 216 | 231 |
| AIDA CoNLL-YAGO Wikidata - entities | 23,374 | 5,912 | 5,608 |
| AIDA CoNLL-YAGO Wikidata - entities with QIDs | 18,540 | 4,791 | 4,481 |
## Additional Information
### Licensing Information
The licensing status of the dataset is the same as the licensing status of the original [AIDA CoNLL-YAGO](https://www.mpi-inf.mpg.de/departments/databases-and-information-systems/research/ambiverse-nlu/aida/downloads) dataset which is under a [Creative Commons Attribution-ShareAlike 3.0 Unported License](http://creativecommons.org/licenses/by-sa/3.0/deed.en_US).
|
cyanic-selkie/aida-conll-yago-wikidata
|
[
"task_categories:token-classification",
"size_categories:10K<n<100K",
"language:en",
"license:cc-by-sa-3.0",
"wikidata",
"wikipedia",
"named-entity-recognition",
"named-entity-linking",
"region:us"
] |
2023-03-22T13:30:44+00:00
|
{"language": ["en"], "license": "cc-by-sa-3.0", "size_categories": ["10K<n<100K"], "task_categories": ["token-classification"], "pretty_name": "AIDA CoNLL-YAGO Wikidata", "tags": ["wikidata", "wikipedia", "named-entity-recognition", "named-entity-linking"]}
|
2023-06-28T18:01:17+00:00
|
555684d03d7529ae39c8d748f813ba2ec3f1eada
|
minnq/dataset
|
[
"license:mit",
"region:us"
] |
2023-03-22T13:32:36+00:00
|
{"license": "mit"}
|
2023-03-22T13:32:36+00:00
|
|
6e1a0783042fc8a08f6c600c4eb33350d3efa143
|
Generated from https://huggingface.co/datasets/michaelthwan/oa_wiki_qa_bart_10000row
Code: [open-assistant git](https://github.com/LAION-AI/Open-Assistant/tree/main/data/datasets)
-> data/datasets/bart_searchgpt_wiki_nlp_augment
---
license: mit
---
|
michaelthwan/oa_wiki_qa_bart_10000row
|
[
"region:us"
] |
2023-03-22T13:47:06+00:00
|
{}
|
2023-03-22T13:50:20+00:00
|
62d27031bd7792542a98c86fae59916cbc2be5cc
|
Thewillonline/gpt4
|
[
"language:en",
"region:us"
] |
2023-03-22T13:47:46+00:00
|
{"language": ["en"], "dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 22272464157, "num_examples": 536764548}], "download_size": 14337362159, "dataset_size": 22272464157}}
|
2023-03-22T18:37:08+00:00
|
|
5ab231077dca93b4430c7ba98dedca6b82816aa5
|
siemvaessen/iati
|
[
"license:other",
"region:us"
] |
2023-03-22T14:49:48+00:00
|
{"license": "other"}
|
2023-03-23T13:09:31+00:00
|
|
8bc340720aa66759fc2545ba1aee0e43e2517b6a
|
# Dataset Card for Dataset Name
## Dataset Description
- **Homepage:** [https://satinbenchmark.github.io](https://satinbenchmark.github.io)
- **Repository:**
- **Paper:** [SATIN: A Multi-Task Metadataset for Classifying Satellite Imagery using Vision-Language Models](https://arxiv.org/pdf/2304.11619.pdf)
- **Leaderboard:** [SATIN Leaderboard](https://satinbenchmark.github.io/leaderboard.md)
### Dataset Summary
SATIN (SATellite ImageNet) is a metadataset containing 27 constituent satellite and aerial image datasets spanning 6 distinct tasks: Land Cover, Land Use,
Hierarchical Land Use, Complex Scenes, Rare Scenes, and False Colour Scenes. The imagery is globally distributed, comprised of resolutions spanning 5 orders
of magnitude, multiple fields of view sizes, and over 250 distinct class labels. Presented at ICCV '23 TNGCV Workshop.
## Dataset Structure
The SATIN benchmark is comprised of the following datasets:
#### Task 1: Land Cover
- SAT-4
- SAT-6
- NASC-TG2
#### Task 2: Land Use
- WHU-RS19
- RSSCN7
- RS_C11
- SIRI-WHU
- EuroSAT
- NWPU-RESISC45
- PatternNet
- RSD46-WHU
- GID
- CLRS
- Optimal-31
#### Task 3: Hierarchical Land Use
- Million-AID
- RSI-CB256
#### Task 4: Complex Scenes
- UC_Merced_LandUse_MultiLabel
- MLRSNet
- MultiScene
- AID_MultiLabel
#### Task 5: Rare Scenes
- Airbus-Wind-Turbines-Patches
- USTC_SmokeRS
- Canadian_Cropland
- Ships-In-Satellite-Imagery
- Satellite-Images-of-Hurricane-Damage
#### Task 6: False Colour Scenes
- Brazilian_Coffee_Scenes
- Brazilian_Cerrado-Savanna_Scenes
For ease of use and to avoid having to download the entire benchmark for each use, in this dataset repository, each of the 27 datasets is included as a separate
'config'.
### Example Usage
```python
from datasets import load_dataset
hf_dataset = load_dataset('jonathan-roberts1/SATIN', DATASET_NAME, split='train') # for DATASET_NAME use one of the configs listed above (e.g., EuroSAT)
features = hf_dataset.features
class_labels = features['label'].names
#class_labels = features['label'].feature.names # for the Complex Scenes datasets
#class_labels_1 = features['label_1'].names # for the Hierarchical Land Use datasets, the label field is replaced with label_1, label_2, ...
random_index = 5
example = hf_dataset[random_index]
image, label = example['image'], example['label']
```
### Data Splits
For each config, there is just the single, default 'train' split.
### Source Data
More information regarding the source data can be found in our paper. Additionally, each of the constituent datasets have been uploaded to HuggingFace datasets.
They can be accessed at: huggingface.co/datasets/jonathan-roberts1/DATASET_NAME.
### Dataset Curators
This dataset was curated by Jonathan Roberts, Kai Han, and Samuel Albanie
### Licensing Information
As SATIN is comprised of existing datasets with differing licenses, there is not a single license for SATIN. All of the datasets in SATIN can be used
for research purposes; usage information of specific constituent datasets can be found in the Appendix of our paper.
### Citation Information
```
@article{roberts2023satin,
title = {SATIN: A Multi-Task Metadataset for Classifying Satellite Imagery using Vision-Language Models},
author = {Jonathan Roberts, Kai Han, and Samuel Albanie},
year = {2023},
eprint = {2304.11619},
archivePrefix= {arXiv},
primaryClass = {cs.CV}
}
```
|
jonathan-roberts1/SATIN
|
[
"task_categories:image-classification",
"task_categories:zero-shot-image-classification",
"size_categories:100K<n<1M",
"language:en",
"license:other",
"arxiv:2304.11619",
"region:us"
] |
2023-03-22T15:10:38+00:00
|
{"language": ["en"], "license": "other", "size_categories": ["100K<n<1M"], "task_categories": ["image-classification", "zero-shot-image-classification"], "pretty_name": "SATellite ImageNet", "configs": [{"config_name": "SAT-4"}, {"config_name": "SAT-6"}, {"config_name": "NASC-TG2"}, {"config_name": "WHU-RS19"}, {"config_name": "RSSCN7"}, {"config_name": "RS_C11"}, {"config_name": "SIRI-WHU"}, {"config_name": "EuroSAT"}, {"config_name": "NWPU-RESISC45"}, {"config_name": "PatternNet"}, {"config_name": "RSD46-WHU"}, {"config_name": "GID"}, {"config_name": "CLRS"}, {"config_name": "Optimal-31"}, {"config_name": "Airbus-Wind-Turbines-Patches"}, {"config_name": "USTC_SmokeRS"}, {"config_name": "Canadian_Cropland"}, {"config_name": "Ships-In-Satellite-Imagery"}, {"config_name": "Satellite-Images-of-Hurricane-Damage"}, {"config_name": "Brazilian_Coffee_Scenes"}, {"config_name": "Brazilian_Cerrado-Savanna_Scenes"}, {"config_name": "Million-AID"}, {"config_name": "UC_Merced_LandUse_MultiLabel"}, {"config_name": "MLRSNet"}, {"config_name": "MultiScene"}, {"config_name": "RSI-CB256"}, {"config_name": "AID_MultiLabel"}]}
|
2023-11-06T10:57:50+00:00
|
50e4e8f8de089bf40950ee043ebff51ad33b3ecb
|
# Dataset Card for Banc Trawsgrifiadau Bangor
This dataset is a bank of 20 hours 6 minutes and 49 seconds of segments of natural speech from over 50 contributors in mp3 file format, together with corresponding 'verbatim' transcripts of the speech in .tsv file format. The majority of the speech is spontaneous, natural speech. The dataset was distributed by Canolfan Bedwyr under a CC0 open license. The original dataset can be found here: [link](https://git.techiaith.bangor.ac.uk/data-porth-technolegau-iaith/banc-trawsgrifiadau-bangor).
## Data Fields
`audio_filename` (`string`): The name of the audio file within the 'clips' folder
`audio_filesize` (`int64`): The size of the file
`audio` (`dict`): A dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. Note that when accessing the audio column: `dataset[0]["audio"]` the audio file is automatically decoded and resampled to `dataset.features["audio"].sampling_rate`. Decoding and resampling of a large number of audio files might take a significant amount of time. Thus it is important to first query the sample index before the `"audio"` column, *i.e.* `dataset[0]["audio"]` should **always** be preferred over `dataset["audio"][0]`.
`transcript` (`string`): The transcript of the audio clip
`duration` (`duration[ms]`): Duration of the clip in milliseconds
## Licensing Information
The dataset was created by Canolfan Bedwyr, partly funded by the Welsh Government, and released under
[Creative Commons Zero v.1.0 Universal](https://git.techiaith.bangor.ac.uk/data-porth-technolegau-iaith/banc-trawsgrifiadau-bangor/-/blob/master/LICENSE)
|
prvInSpace/banc-trawsgrifiadau-bangor
|
[
"task_categories:automatic-speech-recognition",
"size_categories:10K<n<100K",
"language:cy",
"license:cc0-1.0",
"region:us"
] |
2023-03-22T15:11:54+00:00
|
{"language": ["cy"], "license": "cc0-1.0", "size_categories": ["10K<n<100K"], "task_categories": ["automatic-speech-recognition"]}
|
2023-03-22T21:21:53+00:00
|
c34331b5c52bce660d9e5e4227cb96669c9122c4
|
# Dataset Card for Alpaca MT
## Dataset Description
- **Homepage:** https://crfm.stanford.edu/2023/03/13/alpaca.html
- **Repository:** https://github.com/juletx/alpaca-lora-mt
- **Paper:**
- **Leaderboard:**
- **Point of Contact:** Rohan Taori
### Dataset Summary
Alpaca is a dataset of 52,000 instructions and demonstrations generated by OpenAI's `text-davinci-003` engine. This instruction data can be used to conduct instruction-tuning for language models and make the language model follow instruction better. This dataset also includes machine-translated data for 6 Iberian languages: Portuguese, Spanish, Catalan, Basque, Galician and Asturian. Translation was done using NLLB-200 3.3B model.
The authors built on the data generation pipeline from [Self-Instruct framework](https://github.com/yizhongw/self-instruct) and made the following modifications:
- The `text-davinci-003` engine to generate the instruction data instead of `davinci`.
- A [new prompt](https://github.com/tatsu-lab/stanford_alpaca/blob/main/prompt.txt) was written that explicitly gave the requirement of instruction generation to `text-davinci-003`.
- Much more aggressive batch decoding was used, i.e., generating 20 instructions at once, which significantly reduced the cost of data generation.
- The data generation pipeline was simplified by discarding the difference between classification and non-classification instructions.
- Only a single instance was generated for each instruction, instead of 2 to 3 instances as in Self-Instruct.
This produced an instruction-following dataset with 52K examples obtained at a much lower cost (less than $500).
In a preliminary study, the authors also found that the 52K generated data to be much more diverse than the data released by [Self-Instruct](https://github.com/yizhongw/self-instruct/blob/main/data/seed_tasks.jsonl).
### Supported Tasks and Leaderboards
The Alpaca dataset designed for instruction training pretrained language models.
### Languages
The original data in Alpaca is in English (BCP-47 en). We also provide machine-translated data for 6 Iberian languages: Portuguese (BCP-47 pt), Spanish (BCP-47 es), Catalan (BCP-47 ca), Basque (BCP-47 eu), Galician (BCP-47 gl) and Asturian (BCP-47 at).
## Dataset Structure
### Data Instances
An example of "train" looks as follows:
```json
{
"instruction": "Create a classification task by clustering the given list of items.",
"input": "Apples, oranges, bananas, strawberries, pineapples",
"output": "Class 1: Apples, Oranges\nClass 2: Bananas, Strawberries\nClass 3: Pineapples",
"text": "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n### Instruction:\nCreate a classification task by clustering the given list of items.\n\n### Input:\nApples, oranges, bananas, strawberries, pineapples\n\n### Response:\nClass 1: Apples, Oranges\nClass 2: Bananas, Strawberries\nClass 3: Pineapples",
}
```
### Data Fields
The data fields are as follows:
* `instruction`: describes the task the model should perform. Each of the 52K instructions is unique.
* `input`: optional context or input for the task. For example, when the instruction is "Summarize the following article", the input is the article. Around 40% of the examples have an input.
* `output`: the answer to the instruction as generated by `text-davinci-003`.
* `text`: the `instruction`, `input` and `output` formatted with the [prompt template](https://github.com/tatsu-lab/stanford_alpaca#data-release) used by the authors for fine-tuning their models.
### Data Splits
| | train |
|---------------|------:|
| en | 52002 |
| pt | 52002 |
| es | 52002 |
| ca | 52002 |
| eu | 52002 |
| gl | 52002 |
| at | 52002 |
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
Excerpt the [blog post](https://crfm.stanford.edu/2023/03/13/alpaca.html) accompanying the release of this dataset:
> We believe that releasing the above assets will enable the academic community to perform controlled scientific studies on instruction-following language models, resulting in better science and ultimately new techniques to address the existing deficiencies with these models. At the same time, any release carries some risk. First, we recognize that releasing our training recipe reveals the feasibility of certain capabilities. On one hand, this enables more people (including bad actors) to create models that could cause harm (either intentionally or not). On the other hand, this awareness might incentivize swift defensive action, especially from the academic community, now empowered by the means to perform deeper safety research on such models. Overall, we believe that the benefits for the research community outweigh the risks of this particular release. Given that we are releasing the training recipe, we believe that releasing the data, model weights, and training code incur minimal further risk, given the simplicity of the recipe. At the same time, releasing these assets has enormous benefits for reproducible science, so that the academic community can use standard datasets, models, and code to perform controlled comparisons and to explore extensions. Deploying an interactive demo for Alpaca also poses potential risks, such as more widely disseminating harmful content and lowering the barrier for spam, fraud, or disinformation. We have put into place two risk mitigation strategies. First, we have implemented a content filter using OpenAI’s content moderation API, which filters out harmful content as defined by OpenAI’s usage policies. Second, we watermark all the model outputs using the method described in Kirchenbauer et al. 2023, so that others can detect (with some probability) whether an output comes from Alpaca 7B. Finally, we have strict terms and conditions for using the demo; it is restricted to non-commercial uses and to uses that follow LLaMA’s license agreement. We understand that these mitigation measures can be circumvented once we release the model weights or if users train their own instruction-following models. However, by installing these mitigations, we hope to advance the best practices and ultimately develop community norms for the responsible deployment of foundation models.
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
The `alpaca` data is generated by a language model (`text-davinci-003`) and inevitably contains some errors or biases. We encourage users to use this data with caution and propose new methods to filter or improve the imperfections.
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
The dataset is available under the [Creative Commons NonCommercial (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/legalcode).
### Citation Information
```
@misc{alpaca,
author = {Rohan Taori and Ishaan Gulrajani and Tianyi Zhang and Yann Dubois and Xuechen Li and Carlos Guestrin and Percy Liang and Tatsunori B. Hashimoto },
title = {Stanford Alpaca: An Instruction-following LLaMA model},
year = {2023},
publisher = {GitHub},
journal = {GitHub repository},
howpublished = {\url{https://github.com/tatsu-lab/stanford_alpaca}},
}
```
### Contributions
[More Information Needed]
|
HiTZ/alpaca_mt
|
[
"task_categories:text-generation",
"task_ids:dialogue-modeling",
"annotations_creators:no-annotation",
"language_creators:machine-generated",
"multilinguality:multilingual",
"multilinguality:translation",
"size_categories:10K<n<100K",
"source_datasets:tatsu-lab/alpaca",
"language:en",
"language:pt",
"language:es",
"language:ca",
"language:eu",
"language:gl",
"language:at",
"license:cc-by-nc-4.0",
"instruction-finetuning",
"region:us"
] |
2023-03-22T15:27:30+00:00
|
{"annotations_creators": ["no-annotation"], "language_creators": ["machine-generated"], "language": ["en", "pt", "es", "ca", "eu", "gl", "at"], "license": "cc-by-nc-4.0", "multilinguality": ["multilingual", "translation"], "size_categories": ["10K<n<100K"], "source_datasets": ["tatsu-lab/alpaca"], "task_categories": ["text-generation"], "task_ids": ["dialogue-modeling"], "pretty_name": "Alpaca MT", "tags": ["instruction-finetuning"], "dataset_info": [{"config_name": "en", "features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "prompt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 32088854, "num_examples": 51942}], "download_size": 22764890, "dataset_size": 32088854}, {"config_name": "pt", "features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "prompt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 33600380, "num_examples": 51942}], "download_size": 23513483, "dataset_size": 33600380}, {"config_name": "es", "features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "prompt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 35893136, "num_examples": 51942}], "download_size": 24483751, "dataset_size": 35893136}, {"config_name": "ca", "features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "prompt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 33938638, "num_examples": 51942}], "download_size": 23096222, "dataset_size": 33938638}, {"config_name": "eu", "features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "prompt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 29977672, "num_examples": 51942}], "download_size": 20469814, "dataset_size": 29977672}, {"config_name": "gl", "features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "prompt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 32736710, "num_examples": 51942}], "download_size": 22356802, "dataset_size": 32736710}, {"config_name": "at", "features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "prompt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 31487842, "num_examples": 51942}], "download_size": 20688305, "dataset_size": 31487842}]}
|
2023-04-07T14:15:55+00:00
|
d803052c924476522f88e44b8ce1e59d5bd74200
|
# Dataset Card for "robotcombinedroboset"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
homangab/robotcombinedroboset
|
[
"region:us"
] |
2023-03-22T15:30:52+00:00
|
{"dataset_info": {"features": [{"name": "pixel_values", "dtype": "image"}, {"name": "label", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 83858508.0, "num_examples": 190}], "download_size": 10230491, "dataset_size": 83858508.0}}
|
2023-03-22T15:33:33+00:00
|
b3b496e00a52fd99733f378494ef3f6e8a1e8b3c
|
# Dataset Card for "instructions-ar"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
cahya/instructions-ar
|
[
"region:us"
] |
2023-03-22T15:40:47+00:00
|
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1335708.4343484773, "num_examples": 1802}, {"name": "test", "num_bytes": 74864.90114827758, "num_examples": 101}, {"name": "validation", "num_bytes": 74123.66450324513, "num_examples": 100}], "download_size": 0, "dataset_size": 1484697.0}}
|
2023-03-22T15:42:43+00:00
|
46a61522c7cc8423e7be085a91cca76294e8225d
|
# Dataset Card for "instructions-bg"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
cahya/instructions-bg
|
[
"region:us"
] |
2023-03-22T15:41:55+00:00
|
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1656280.9056415376, "num_examples": 1802}, {"name": "test", "num_bytes": 92832.6145781328, "num_examples": 101}, {"name": "validation", "num_bytes": 91913.47978032951, "num_examples": 100}], "download_size": 862720, "dataset_size": 1841027.0}}
|
2023-03-22T15:42:57+00:00
|
d5d4f5c0fbea37caeb8b7a29dc521caab6cae15a
|
# Dataset Card for "instructions-bn"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
cahya/instructions-bn
|
[
"region:us"
] |
2023-03-22T15:42:58+00:00
|
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2244818.7, "num_examples": 1791}, {"name": "test", "num_bytes": 125338.84422110552, "num_examples": 100}, {"name": "validation", "num_bytes": 124085.45577889447, "num_examples": 99}], "download_size": 930748, "dataset_size": 2494243.0}}
|
2023-03-22T15:43:17+00:00
|
410dc405ee624cd0f1f6368dc59505451981ac7b
|
# Dataset Card for "instructions-ca"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
cahya/instructions-ca
|
[
"region:us"
] |
2023-03-22T15:43:18+00:00
|
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 978054.1677483774, "num_examples": 1802}, {"name": "test", "num_bytes": 54818.796305541684, "num_examples": 101}, {"name": "validation", "num_bytes": 54276.035946080876, "num_examples": 100}], "download_size": 641125, "dataset_size": 1087149.0}}
|
2023-03-22T15:43:39+00:00
|
d4da63a894ff15bc597e26fab2c4590d4d44a2fb
|
# Dataset Card for "instructions-el"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
cahya/instructions-el
|
[
"region:us"
] |
2023-03-22T15:43:40+00:00
|
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1798844.0259610584, "num_examples": 1802}, {"name": "test", "num_bytes": 100823.1113330005, "num_examples": 101}, {"name": "validation", "num_bytes": 99824.8627059411, "num_examples": 100}], "download_size": 945906, "dataset_size": 1999492.0}}
|
2023-03-22T15:43:58+00:00
|
5d225d37c358f1488440d0d2e0c236149299a7a2
|
# Dataset Card for "instructions-et"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
cahya/instructions-et
|
[
"region:us"
] |
2023-03-22T15:43:59+00:00
|
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 880362.0, "num_examples": 1800}, {"name": "test", "num_bytes": 48909.0, "num_examples": 100}, {"name": "validation", "num_bytes": 48909.0, "num_examples": 100}], "download_size": 622841, "dataset_size": 978180.0}}
|
2023-03-22T15:44:20+00:00
|
d6fdc74817064effba1a10d76b8c00dcb33b6e59
|
# Dataset Card for "instructions-fi"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
cahya/instructions-fi
|
[
"region:us"
] |
2023-03-22T15:44:21+00:00
|
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 936328.3665338645, "num_examples": 1807}, {"name": "test", "num_bytes": 52334.900398406375, "num_examples": 101}, {"name": "validation", "num_bytes": 51816.73306772908, "num_examples": 100}], "download_size": 640961, "dataset_size": 1040480.0}}
|
2023-03-22T15:44:39+00:00
|
86943d963bac8c6b3be54c3edfa4f7c7d07eb5d5
|
# Dataset Card for "instructions-ht"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
cahya/instructions-ht
|
[
"region:us"
] |
2023-03-22T15:44:40+00:00
|
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 849311.1, "num_examples": 1800}, {"name": "test", "num_bytes": 47183.95, "num_examples": 100}, {"name": "validation", "num_bytes": 47183.95, "num_examples": 100}], "download_size": 551605, "dataset_size": 943678.9999999999}}
|
2023-03-22T15:44:57+00:00
|
34e7d70744194a78afc60e7b080dffeea52a799e
|
# Dataset Card for "instructions-it"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
cahya/instructions-it
|
[
"region:us"
] |
2023-03-22T15:44:58+00:00
|
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 978621.3237195425, "num_examples": 1809}, {"name": "test", "num_bytes": 54638.33814022874, "num_examples": 101}, {"name": "validation", "num_bytes": 54638.33814022874, "num_examples": 101}], "download_size": 648736, "dataset_size": 1087898.0}}
|
2023-03-22T15:45:15+00:00
|
1f0406d8bede5d4fb8ae46f6d1381e694b822ea1
|
# Dataset Card for "instructions-ko"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
cahya/instructions-ko
|
[
"region:us"
] |
2023-03-22T15:45:16+00:00
|
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1016857.3512963904, "num_examples": 1770}, {"name": "test", "num_bytes": 56875.07219115404, "num_examples": 99}, {"name": "validation", "num_bytes": 56300.57651245552, "num_examples": 98}], "download_size": 631602, "dataset_size": 1130033.0}}
|
2023-03-22T15:45:33+00:00
|
a5d60ae36e0c478f652ae4158482b3b0088ceadc
|
# Dataset Card for "instructions-sw"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
cahya/instructions-sw
|
[
"region:us"
] |
2023-03-22T15:45:33+00:00
|
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 917585.3073463269, "num_examples": 1800}, {"name": "test", "num_bytes": 51486.73113443278, "num_examples": 101}, {"name": "validation", "num_bytes": 50976.96151924038, "num_examples": 100}], "download_size": 581487, "dataset_size": 1020049.0000000001}}
|
2023-03-22T15:45:51+00:00
|
cdb1c6073c5a7f9eb03b0a91cd965a097f72bfd9
|
# Dataset Card for "instructions-ta"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
cahya/instructions-ta
|
[
"region:us"
] |
2023-03-22T15:45:51+00:00
|
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2612517.3091275846, "num_examples": 1784}, {"name": "test", "num_bytes": 146441.55320221887, "num_examples": 100}, {"name": "validation", "num_bytes": 144977.13767019668, "num_examples": 99}], "download_size": 1024957, "dataset_size": 2903936.0}}
|
2023-03-22T15:46:11+00:00
|
5c841a7db791e37f22c463c17ed47a380dfeb6e2
|
# Dataset Card for "instructions-tr"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
cahya/instructions-tr
|
[
"region:us"
] |
2023-03-22T15:46:11+00:00
|
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 972793.8509566969, "num_examples": 1787}, {"name": "test", "num_bytes": 54437.26082578046, "num_examples": 100}, {"name": "validation", "num_bytes": 53892.88821752266, "num_examples": 99}], "download_size": 631831, "dataset_size": 1081124.0}}
|
2023-03-22T15:46:29+00:00
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.