sha
stringlengths
40
40
text
stringlengths
0
13.4M
id
stringlengths
2
117
tags
list
created_at
stringlengths
25
25
metadata
stringlengths
2
31.7M
last_modified
stringlengths
25
25
6713a5d254387ba2a163a3fea6b353d4eb1849e5
# zh-tw-pythia-ta8000-v1-e1-tr_sg-301-c1024-sbldt5 This dataset is a part of the `zh-tw-llm` project. * Tokenizer: `zh-tw-pythia-tokenizer-a8000-v1` * Built with: `sharegpt` * Rows: `train` `5950`, `test` `95` * Max length: `1024` * Full config: ```json {"build_with": ["sharegpt"], "preview_length": 128, "sort_by": "length-desc", "translations_settings": {"source_dataset": "zetavg/coct-en-zh-tw-translations-twp-300k", "lang_1_key": "en", "lang_2_key": "ch", "templates": ["English: {lang_1}\nChinese: {lang_2}", "Chinese: {lang_2}\nEnglish: {lang_1}"], "use_template": "random", "rows_limit": 300000, "test_size": 100, "test_split_seed": 42}, "sharegpt_settings": {"source_dataset": "zetavg/ShareGPT-Processed", "train_on_inputs": false, "languages": [{"en": 0.4}, "zh_Hant"], "rows_limit": 8000, "test_size": 0.02, "test_split_seed": 42, "test_rows_limit": 100}} ```
zh-tw-llm-dv/zh-tw-pythia-ta8000-v1-e1-tr_sg-301-c1024-sbldt5
[ "region:us" ]
2023-05-19T19:31:34+00:00
{"dataset_info": {"dataset_size": 52277349.68449913, "download_size": 14950589, "features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "labels", "sequence": "int64"}, {"dtype": "string", "name": "preview"}, {"dtype": "int64", "name": "length"}, {"dtype": "int64", "name": "messages_count"}], "splits": [{"name": "train", "num_bytes": 51379878.03449913, "num_examples": 5950}, {"name": "test", "num_bytes": 897471.65, "num_examples": 95}]}}
2023-05-19T19:32:02+00:00
48a68ccf4ddee2b89e06192abc4bf18948a0f8d4
# Dataset Card for "chunk_77" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_77
[ "region:us" ]
2023-05-19T19:42:38+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1299473308, "num_examples": 255199}], "download_size": 1327008744, "dataset_size": 1299473308}}
2023-05-19T19:47:47+00:00
a49c0e673f3555547d2e8a1d91f8d2d8c1d6b2e5
# Dataset Card for "chunk_79" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_79
[ "region:us" ]
2023-05-19T19:42:49+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1293566588, "num_examples": 254039}], "download_size": 1321364148, "dataset_size": 1293566588}}
2023-05-19T19:45:15+00:00
e765e453efb314c77bb3206a0c7bd58c01a7b16b
# zh-tw-pythia-ta8000-v1-e1-tr_sg-302-c1024 This dataset is a part of the `zh-tw-llm` project. * Tokenizer: `zh-tw-pythia-tokenizer-a8000-v1` * Built with: `translations`, `sharegpt` * Rows: `train` `305958`, `test` `195` * Max length: `1024` * Full config: ```json {"build_with": ["translations", "sharegpt"], "preview_length": 128, "translations_settings": {"source_dataset": "zetavg/coct-en-zh-tw-translations-twp-300k", "lang_1_key": "en", "lang_2_key": "ch", "templates": ["English: {lang_1}\nChinese: {lang_2}", "Chinese: {lang_2}\nEnglish: {lang_1}"], "use_template": "random", "rows_limit": 300000, "test_size": 100, "test_split_seed": 42}, "sharegpt_settings": {"source_dataset": "zetavg/ShareGPT-Processed", "train_on_inputs": false, "languages": [{"en": 0.4}, "zh_Hant"], "rows_limit": 8000, "test_size": 0.02, "test_split_seed": 42, "test_rows_limit": 100}} ```
zh-tw-llm-dv/zh-tw-pythia-ta8000-v1-e1-tr_sg-302-c1024
[ "region:us" ]
2023-05-19T19:47:29+00:00
{"dataset_info": {"dataset_size": 447310289.2648228, "download_size": 178540815, "features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "labels", "sequence": "int64"}, {"dtype": "string", "name": "preview"}, {"dtype": "int64", "name": "length"}, {"dtype": "int64", "name": "messages_count"}], "splits": [{"name": "train", "num_bytes": 446343976.5148228, "num_examples": 305958}, {"name": "test", "num_bytes": 966312.75, "num_examples": 195}]}}
2023-05-19T19:49:08+00:00
991d99267f6a921869cc199c9ae01b22e5366c88
# Dataset Card for "chunk_75" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_75
[ "region:us" ]
2023-05-19T19:56:05+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1235283556, "num_examples": 242593}], "download_size": 1259370393, "dataset_size": 1235283556}}
2023-05-19T19:58:24+00:00
f3ccde8eec7b4c23c354ebbdb64c9441b0e6f3b4
# zh-tw-pythia-ta8000-v1-e1-tr_sg-201-c1024 This dataset is a part of the `zh-tw-llm` project. * Tokenizer: `zh-tw-pythia-tokenizer-a8000-v1` * Built with: `translations`, `sharegpt` * Rows: `train` `205965`, `test` `195` * Max length: `1024` * Full config: ```json {"build_with": ["translations", "sharegpt"], "preview_length": 128, "translations_settings": {"source_dataset": "zetavg/coct-en-zh-tw-translations-twp-300k", "lang_1_key": "en", "lang_2_key": "ch", "templates": ["English: {lang_1}\nChinese: {lang_2}", "Chinese: {lang_2}\nEnglish: {lang_1}"], "use_template": "random", "rows_limit": 200000, "test_size": 100, "test_split_seed": 42}, "sharegpt_settings": {"source_dataset": "zetavg/ShareGPT-Processed", "train_on_inputs": false, "languages": [{"en": 0.4}, "zh_Hant"], "rows_limit": 8000, "test_size": 0.02, "test_split_seed": 42, "test_rows_limit": 100}} ```
zh-tw-llm-dv/zh-tw-pythia-ta8000-v1-e1-tr_sg-201-c1024
[ "region:us" ]
2023-05-19T20:20:10+00:00
{"dataset_info": {"dataset_size": 315912726.185796, "download_size": 124158066, "features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "labels", "sequence": "int64"}, {"dtype": "string", "name": "preview"}, {"dtype": "int64", "name": "length"}, {"dtype": "int64", "name": "messages_count"}], "splits": [{"name": "train", "num_bytes": 314946413.435796, "num_examples": 205965}, {"name": "test", "num_bytes": 966312.75, "num_examples": 195}]}}
2023-05-19T20:21:28+00:00
db8f33c92599de69ee98069e17b216dd5e911439
# Dataset Card for "IAM_Sentences_LLaVA" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
alpayariyak/IAM_Sentences_LLaVA
[ "region:us" ]
2023-05-19T20:46:41+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "id", "dtype": "string"}, {"name": "conversations", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1053875995.077, "num_examples": 5663}], "download_size": 1128902513, "dataset_size": 1053875995.077}}
2023-05-19T21:04:20+00:00
58ef5283790470ad0c73dd4e577cefcb0c75befc
### Dataset Summary This dataset is comprised of news articles in the luganda language. For each article a title is also given. The dataset can be used to fine tune a Luganda news article generator. ### Languages Luganda. Luganda is the most widely spoken indigenous Language in Uganda. ### Source Data The articles were sourced from various online luganda news websites like Bukedde.
simonry14/luganda-news-articles
[ "license:mit", "region:us" ]
2023-05-19T21:16:01+00:00
{"license": "mit"}
2023-06-02T16:28:15+00:00
bbff2711295cb79309ec32b1e403af8645669a9d
**Kotone Shirakawa from Overflow (hentai anime)** - *Trained with anime (full-final-pruned) model.* - *Best resultd with ALL and OUTALL LoRA weight blocks, and with 0.4 to 0.7* weights.* - *5 versions; 6, 7, 8, 9, and 10 epochs.*
Cheetor1996/Kotone_Shirakawa
[ "language:en", "license:cc-by-2.0", "art", "region:us" ]
2023-05-19T21:31:16+00:00
{"language": ["en"], "license": "cc-by-2.0", "pretty_name": "Kotone Shirakawa", "tags": ["art"]}
2023-05-19T22:03:41+00:00
864f38d79a8974056c98e04694df0ea042a525e4
andyh28/testing
[ "license:afl-3.0", "region:us" ]
2023-05-19T21:44:01+00:00
{"license": "afl-3.0"}
2023-05-19T21:44:01+00:00
2fc6f8e1643e6f0e2438803514a7c1b4f682f014
# Dataset Card for "chunk_84" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_84
[ "region:us" ]
2023-05-19T21:44:51+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1112428872, "num_examples": 218466}], "download_size": 1133718660, "dataset_size": 1112428872}}
2023-05-19T21:47:01+00:00
fa192bb26116473781cc1313fa0d586589bd79b6
# Dataset Card for "chunk_80" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_80
[ "region:us" ]
2023-05-19T21:49:12+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1280276468, "num_examples": 251429}], "download_size": 1307696619, "dataset_size": 1280276468}}
2023-05-19T21:50:07+00:00
f0e8b171d554d3111cc26594c8a877681ff2d653
# Dataset Card for "chunk_83" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_83
[ "region:us" ]
2023-05-19T21:50:23+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1301973480, "num_examples": 255690}], "download_size": 1325402800, "dataset_size": 1301973480}}
2023-05-19T21:51:18+00:00
08dba98d358b82a018d62ae4245d914994e7c8ed
# Dataset Card for "chunk_88" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_88
[ "region:us" ]
2023-05-19T21:50:45+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1326659496, "num_examples": 260538}], "download_size": 1352975176, "dataset_size": 1326659496}}
2023-05-19T21:51:27+00:00
10d4b60d70358acbb5881369b1e0b273273a0a7e
# Dataset Card for "chunk_82" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_82
[ "region:us" ]
2023-05-19T21:51:16+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1355276536, "num_examples": 266158}], "download_size": 1381999770, "dataset_size": 1355276536}}
2023-05-19T21:52:03+00:00
5192c01632a304d02577c1653718bf0eb938016b
# Dataset Card for "chunk_86" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_86
[ "region:us" ]
2023-05-19T21:53:26+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1377070296, "num_examples": 270438}], "download_size": 1404210357, "dataset_size": 1377070296}}
2023-05-19T21:54:15+00:00
2be877071b660c54b76aaf367de7fa89f48ddda6
# Dataset Card for "0c5dfa67" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
results-sd-v1-5-sd-v2-1-if-v1-0-karlo/0c5dfa67
[ "region:us" ]
2023-05-19T21:55:15+00:00
{"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 186, "num_examples": 10}], "download_size": 1337, "dataset_size": 186}}
2023-05-19T21:55:16+00:00
9e1dcf60caed8caa5011195e709a90a19c80715b
# Dataset Card for "chunk_87" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_87
[ "region:us" ]
2023-05-19T21:56:15+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1346207684, "num_examples": 264377}], "download_size": 1372849494, "dataset_size": 1346207684}}
2023-05-19T21:57:26+00:00
2a545501caafa31f901fb080e89f6d88e34a45a7
# Dataset Card for "chunk_89" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_89
[ "region:us" ]
2023-05-19T22:08:23+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1309647124, "num_examples": 257197}], "download_size": 1335432864, "dataset_size": 1309647124}}
2023-05-19T22:13:43+00:00
6d0fc3dd5b47de894607c1579164912a69c5352a
# Dataset Card for "chunk_85" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_85
[ "region:us" ]
2023-05-19T22:18:33+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1436427740, "num_examples": 282095}], "download_size": 1464759870, "dataset_size": 1436427740}}
2023-05-19T22:20:50+00:00
1f68f17b86d844831291f63fae3c199a81ade623
# Dataset Card for "chunk_81" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_81
[ "region:us" ]
2023-05-19T22:19:57+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1484144872, "num_examples": 291466}], "download_size": 1500783089, "dataset_size": 1484144872}}
2023-05-19T22:22:40+00:00
bc798098656dbfe82736467814b7f45b66e1280b
KRJudge/TestKOJO
[ "license:unknown", "doi:10.57967/hf/0671", "region:us" ]
2023-05-20T01:03:10+00:00
{"license": "unknown"}
2023-05-20T01:31:04+00:00
9cdafe8c711dc2f105c85d79b2e5a634961478cb
# Dataset Card for "sharegpt-alpaca-unfiltered-94k" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Dampish/sharegpt-alpaca-unfiltered-94k
[ "region:us" ]
2023-05-20T01:14:44+00:00
{"dataset_info": {"features": [{"name": "output", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "instruction", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 629328864, "num_examples": 94145}], "download_size": 263823762, "dataset_size": 629328864}}
2023-05-20T01:15:04+00:00
98d555d6917764af90b579db2c99f8026f8c2efa
# Dataset Card for "weibo_ner_knowledge_V3_wc" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
doushabao4766/weibo_ner_knowledge_V3_wc
[ "region:us" ]
2023-05-20T01:21:48+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "ner_tags", "sequence": {"class_label": {"names": {"0": "B-GPE.NAM", "1": "B-GPE.NOM", "2": "B-LOC.NAM", "3": "B-LOC.NOM", "4": "B-ORG.NAM", "5": "B-ORG.NOM", "6": "B-PER.NAM", "7": "B-PER.NOM", "8": "I-GPE.NAM", "9": "I-GPE.NOM", "10": "I-LOC.NAM", "11": "I-LOC.NOM", "12": "I-ORG.NAM", "13": "I-ORG.NOM", "14": "I-PER.NAM", "15": "I-PER.NOM", "16": "O"}}}}, {"name": "knowledge", "dtype": "string"}, {"name": "token_words", "sequence": {"sequence": "string"}}, {"name": "knowledge_words", "sequence": {"sequence": "string"}}], "splits": [{"name": "train", "num_bytes": 7027512, "num_examples": 1350}, {"name": "validation", "num_bytes": 1116528, "num_examples": 270}, {"name": "test", "num_bytes": 1107689, "num_examples": 270}], "download_size": 2405285, "dataset_size": 9251729}}
2023-05-20T01:21:51+00:00
9ff8558f64727c5d42cb641458928cd8e2c3efdf
# Dataset Card for "viet_youtube_asr_corpus_v2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
linhtran92/viet_youtube_asr_corpus_v2
[ "region:us" ]
2023-05-20T01:33:58+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "transcription", "dtype": "string"}, {"name": "w2v2_transcription", "dtype": "string"}, {"name": "WER", "dtype": "int64"}, {"name": "sum", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 16003127322.525991, "num_examples": 194747}, {"name": "test", "num_bytes": 1778161779.8073397, "num_examples": 21639}], "download_size": 17552533348, "dataset_size": 17781289102.333332}}
2023-05-20T02:34:49+00:00
e121aed36d8501ba8cbd53b4992c246c2625d6be
# A Named Entity Recognition Dataset for Kazakh - This is a modified version of the dataset provided in the [LREC 2022](https://lrec2022.lrec-conf.org/en/) paper [*KazNERD: Kazakh Named Entity Recognition Dataset*](https://aclanthology.org/2022.lrec-1.44). - The original repository for the paper can be found at *https://github.com/IS2AI/KazNERD*. - Tokens denoting speech disfluencies and hesitations (parenthesised) and background noise [bracketed] were removed. - A total of 2,027 duplicate sentences were removed. ### Statistics for training (Train), validation (Valid), and test (Test) sets | Unit | Train | Valid | Test | Total | | :---: | :---: | :---: | :---: | :---: | | Sentence | 88,540 (80.00%) | 11,067 (10.00%) | 11,068 (10.00%) | 110,675 (100%) | | Token | 1,088,461 (80.04%) | 136,021 (10.00%) | 135,426 (9.96%) | 1,359,908 (100%) | | NE | 106,148 (80.17%) | 13,189 (9.96%) | 13,072 (9.87%) | 132,409 (100%) | ### 80 / 10 / 10 split |Representation| Train | Valid | Test | Total | | :---: | :---: | :---: | :---: | :---: | | **AID** | 67,582 (79.99%) | 8,439 (9.99%) | 8,467 (10.02%)| 84,488 (100%) | | **BID** | 19,006 (80.11%) | 2,380 (10.03%) | 2,338 (9.85%)| 23,724 (100%) | | **CID** | 1,050 (78.89%) | 138 (10.37%) | 143 ( 10.74%) | 1,331 (100%) | | **DID** | 633 (79.22%) | 82 (10.26%) | 84 (10.51%) | 799 (100%) | | **EID** | 260 (81.00%) | 27 (8.41%) | 34 (10.59%)| 321 (100%) | | **FID** | 9 (75.00%) | 1 (8.33%)| 2 (16.67%)| 12 (100%) | |**Total**| **88,540 (80.00%)** | **11,067 (10.00%)** | **11,068 (10.00%)** | **110,675 (100%)** | ### Distribution of representations across sets |Representation| Train | Valid | Test | Total | | :---: | :---: | :---: | :---: | :---: | | **AID** | 67,582 (76.33%) | 8,439 (76.25%) | 8,467 (76.50%)| 84,488 (76.34%) | | **BID** | 19,006 (21.47%) | 2,380 (21.51%) | 2,338 (21.12%)| 23,724 (21.44%) | | **CID** | 1,050 (1.19%) | 138 (1.25%) | 143 ( 1.29%) | 1,331 (1.20%) | | **DID** | 633 (0.71%) | 82 (0.74%) | 84 (0.76%) | 799 (0.72%) | | **EID** | 260 (0.29%) | 27 (0.24%) | 34 (0.31%)| 321 (0.29%) | | **FID** | 9 (0.01%) | 1 (0.01%)| 2 (0.02%)| 12 (0.01%) | |**Total**| **88,540 (100.00%)** | **11,067 (10.00%)** | **11,068 (10.00%)** | **110,675 (100%)** | ### Distribution of NEs across sets | **NE Class** | **Train** | **Valid** | **Test** | **Total** | |:---:| :---: | :---: | :---: | :---: | | **ADAGE** | 153 (0.14%) | 19 (0.14%) | 17 (0.13%) | 189 (0.14%) | | **ART** | 1,533 (1.44%) | 155 (1.18%) | 161 (1.23%) | 1,849 (1.40%) | | **CARDINAL** | 23,135 (21.8%) | 2,878 (21.82%) | 2,789 (21.34%) | 28,802 (21.75%) | | **CONTACT** | 159 (0.15%) | 18 (0.14%) | 20 (0.15%) | 197 (0.15%) | | **DATE** | 20,006 (18.85%) | 2,603 (19.74%) | 2,584 (19.77%) | 25,193 (19.03%) | | **DISEASE** | 1,022 (0.96%) | 121 (0.92%) | 119 (0.91%) | 1,262 (0.95%) | | **EVENT** | 1,331 (1.25%) | 154 (1.17%) | 154 (1.18%) | 1,639 (1.24%) | | **FACILITY** | 1,723 (1.62%) | 178 (1.35%) | 197 (1.51%) | 2,098 (1.58%) | | **GPE** | 13,625 (12.84%) | 1,656 (12.56%) | 1,691 (12.94%) | 16,972 (12.82%) | | **LANGUAGE** | 350 (0.33%) | 47 (0.36%) | 41 (0.31%) | 438 (0.33%) | | **LAW** | 419 (0.39%) | 56 (0.42%) | 55 (0.42%) | 530 (0.40%) | | **LOCATION** | 1,736 (1.64%) | 210 (1.59%) | 208 (1.59%) | 2,154 (1.63%) | | **MISCELLANEOUS** | 191 (0.18%) | 26 (0.2%) | 26 (0.2%) | 243 (0.18%) | | **MONEY** | 3,652 (3.44%) | 455 (3.45%) | 427 (3.27%) | 4,534 (3.42%) | | **NON_HUMAN** | 6 (0.01%) | 1 (0.01%) | 1 (0.01%) | 8 (0.01%) | | **NORP** | 2,929 (2.76%) | 374 (2.84%) | 368 (2.82%) | 3,671 (2.77%) | | **ORDINAL** | 3,054 (2.88%) | 385 (2.92%) | 382 (2.92%) | 3,821 (2.89%) | | **ORGANISATION** | 5,956 (5.61%) | 753 (5.71%) | 718 (5.49%) | 7,427 (5.61%) | | **PERCENTAGE** | 3,357 (3.16%) | 437 (3.31%) | 462 (3.53%) | 4,256 (3.21%) | | **PERSON** | 9,817 (9.25%) | 1,175 (8.91%) | 1,151 (8.81%) | 12,143 (9.17%) | | **POSITION** | 4,844 (4.56%) | 587 (4.45%) | 597 (4.57%) | 6,028 (4.55%) | | **PRODUCT** | 586 (0.55%) | 73 (0.55%) | 75 (0.57%) | 734 (0.55%) | | **PROJECT** | 1,681 (1.58%) | 209 (1.58%) | 206 (1.58%) | 2,096 (1.58%) | | **QUANTITY** | 3,063 (2.89%) | 411 (3.12%) | 403 (3.08%) | 3,877 (2.93%) | | **TIME** | 1,820 (1.71%) | 208 (1.58%) | 220 (1.68%) | 2,248 (1.70%) | | **Total** | **106,148 (100%)** | **13,189 (100%)** | **13,072 (100%)** | **132,409 (100%)** |
yeshpanovrustem/ner-kazakh
[ "task_categories:token-classification", "task_ids:named-entity-recognition", "multilinguality:monolingual", "size_categories:100K<n<1M", "language:kk", "license:cc-by-4.0", "region:us" ]
2023-05-20T01:58:58+00:00
{"language": ["kk"], "license": "cc-by-4.0", "multilinguality": ["monolingual"], "size_categories": ["100K<n<1M"], "task_categories": ["token-classification"], "task_ids": ["named-entity-recognition"], "pretty_name": "A Named Entity Recognition Dataset for Kazakh"}
2023-05-28T06:57:06+00:00
5937f91251ec335f8f76fefa3c0914074544fa72
aiensured/textpipeline
[ "license:openrail", "region:us" ]
2023-05-20T02:05:47+00:00
{"license": "openrail"}
2023-05-20T02:07:47+00:00
9be136cf7fbda80e27f98d1d1db1d9aa53bdff1c
# Dataset Card for "Stellar5-1200" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Dampish/Stellar5-1200
[ "region:us" ]
2023-05-20T02:12:11+00:00
{"dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "instruction", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 99751167, "num_examples": 116071}], "download_size": 48435050, "dataset_size": 99751167}}
2023-05-20T02:12:19+00:00
ed90658752ff5fa8155dc925570a627577e21ed5
# Dataset Card for "StellarX-FULL" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Dampish/Stellar-chat-full
[ "region:us" ]
2023-05-20T02:27:34+00:00
{"dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "instruction", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1557748531, "num_examples": 549148}], "download_size": 555047365, "dataset_size": 1557748531}}
2023-05-20T02:28:13+00:00
10f6b16a91c2663acef21e6f701801393c68c2eb
cognizedeepak/CognizeDeepak
[ "license:other", "region:us" ]
2023-05-20T03:41:05+00:00
{"license": "other"}
2023-05-20T03:41:05+00:00
98ba5e1cd9f9603eb9860e9409deaffd34829530
# Dataset Card for "instructional_code-search-net-python" ## Dataset Description - **Homepage:** None - **Repository:** https://huggingface.co/datasets/Nan-Do/instructional_code-search-net-python - **Paper:** None - **Leaderboard:** None - **Point of Contact:** [@Nan-Do](https://github.com/Nan-Do) ### Dataset Summary This is an instructional dataset for Python. The dataset contains two different kind of tasks: - Given a piece of code generate a description of what it does. - Given a description generate a piece of code that fulfils the description. ### Languages The dataset is in English. ### Data Splits There are no splits. ## Dataset Creation May of 2023 ### Curation Rationale This dataset was created to improve the coding capabilities of LLMs. ### Source Data The summarized version of the code-search-net dataset can be found at https://huggingface.co/datasets/Nan-Do/code-search-net-python ### Annotations The dataset includes an instruction and response columns. #### Annotation process The annotation procedure was done using templates and NLP techniques to generate human-like instructions and responses. A sample notebook of the process can be found at https://github.com/Nan-Do/OpenAssistantInstructionResponsePython The annontations have been cleaned to make sure there are no repetitions and/or meaningless summaries. ### Licensing Information Apache 2.0
Nan-Do/instructional_code-search-net-python
[ "task_categories:conversational", "task_categories:text-generation", "task_categories:text2text-generation", "language:en", "license:apache-2.0", "Python", "Code generation", "Instruction Response", "region:us" ]
2023-05-20T03:50:17+00:00
{"language": ["en"], "license": "apache-2.0", "task_categories": ["conversational", "text-generation", "text2text-generation"], "pretty_name": "Instructional Python Dataset", "dataset_info": {"features": [{"name": "INSTRUCTION", "dtype": "string"}, {"name": "RESPONSE", "dtype": "string"}, {"name": "SOURCE", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 451473573, "num_examples": 418545}], "download_size": 172777462, "dataset_size": 451473573}, "tags": ["Python", "Code generation", "Instruction Response"]}
2023-05-20T04:09:44+00:00
ac302b2bee9dee345fe28cf09771f8be7c527457
chao1224/ChatDrug_data
[ "license:afl-3.0", "region:us" ]
2023-05-20T04:19:02+00:00
{"license": "afl-3.0"}
2023-05-20T04:25:14+00:00
d8775b32cc5736cc3a4172a04b7a8f9f0e11ed7d
# Dataset Card for "flores200_devtest_mt5-1b-flores200-baseline" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
hlillemark/flores200_devtest_mt5-1b-flores200-baseline
[ "region:us" ]
2023-05-20T04:35:54+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int32"}, {"name": "source_lang", "dtype": "string"}, {"name": "target_lang", "dtype": "string"}, {"name": "source", "dtype": "string"}, {"name": "target", "dtype": "string"}, {"name": "prediction", "dtype": "string"}, {"name": "chrf_unreduced", "dtype": "string"}], "splits": [{"name": "devtest", "num_bytes": 372389754, "num_examples": 500000}], "download_size": 252368442, "dataset_size": 372389754}}
2023-05-20T17:46:08+00:00
74be92e99e865edf44b0f7873f4242ba7162b684
Travad98/sogc-trademarks-1883-2001
[ "task_categories:image-to-text", "size_categories:1K<n<10K", "economics", "legal", "region:us" ]
2023-05-20T05:33:26+00:00
{"size_categories": ["1K<n<10K"], "task_categories": ["image-to-text"], "pretty_name": "t", "tags": ["economics", "legal"], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 815537652.616, "num_examples": 3003}], "download_size": 814717080, "dataset_size": 815537652.616}}
2023-05-20T06:24:34+00:00
12ff1d7fce573fdd8d13bda402e51b3e3ad2d904
This is a test on how to advertise a dataset that we host on Synapse built off of this [example](https://huggingface.co/datasets/katielink/synapse_test_dataset) To download files in this [dataset](https://www.synapse.org/#!Synapse:syn51520471/files/) you must [register](https://help.synapse.org/docs/Managing-Your-Account.2055405596.html) for a Synapse account After registering for an account, please [create a personal access token](https://help.synapse.org/docs/Managing-Your-Account.2055405596.html#ManagingYourAccount-PersonalAccessTokens) and install the Python client. ``` pip install synapseclient export SYNAPSE_AUTH_TOKEN=<Access Token here> ``` Load the dataset using the huggingface datasets Python API ```python from datasets import load_dataset dataset = load_dataset('SageBio/testdataset', split='train') ```
SageBio/testdataset
[ "language:en", "license:other", "region:us" ]
2023-05-20T05:46:37+00:00
{"language": ["en"], "license": "other", "extra_gated_prompt": "You agree to not attempt to determine the identity of individuals in this dataset", "extra_gated_fields": {"Company": "text", "Country": "text", "I agree to use this model for non-commercial use ONLY": "checkbox"}}
2023-05-21T06:02:38+00:00
93a052103034fbd946897c29cf5c11d7adb51671
# Dataset Card for "donut-docparsing-sogc-trademarks-1883-2001" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Travad98/donut-docparsing-sogc-trademarks-1883-2001
[ "region:us" ]
2023-05-20T06:24:30+00:00
{"dataset_info": {"features": [{"name": "pixel_values", "sequence": {"sequence": {"sequence": "float32"}}}, {"name": "labels", "sequence": "int64"}, {"name": "target_sequence", "dtype": "string"}, {"name": "period", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 39793168388.07692, "num_examples": 2695}, {"name": "test", "num_bytes": 4547790672.923077, "num_examples": 308}], "download_size": 58622689, "dataset_size": 44340959061.0}}
2023-05-20T08:03:49+00:00
5da7d5ef4b77cec16ed51bfafe35b8258675acc6
# Dataset Card for "chunk_97" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_97
[ "region:us" ]
2023-05-20T07:00:49+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1233302768, "num_examples": 242204}], "download_size": 1258110810, "dataset_size": 1233302768}}
2023-05-20T07:01:33+00:00
41d583257a9ab937c57a699d7c1d7e7602ae8803
FidelOdok/DOA_datasetSmall
[ "license:creativeml-openrail-m", "region:us" ]
2023-05-20T07:03:20+00:00
{"license": "creativeml-openrail-m", "dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "0", "1": "1", "2": "10", "3": "100", "4": "101", "5": "102", "6": "103", "7": "104", "8": "105", "9": "106", "10": "107", "11": "108", "12": "109", "13": "11", "14": "110", "15": "111", "16": "112", "17": "113", "18": "114", "19": "115", "20": "116", "21": "117", "22": "118", "23": "119", "24": "12", "25": "120", "26": "121", "27": "122", "28": "123", "29": "124", "30": "125", "31": "126", "32": "127", "33": "128", "34": "129", "35": "13", "36": "130", "37": "131", "38": "132", "39": "133", "40": "134", "41": "135", "42": "136", "43": "137", "44": "138", "45": "139", "46": "14", "47": "140", "48": "141", "49": "142", "50": "143", "51": "144", "52": "145", "53": "146", "54": "147", "55": "148", "56": "149", "57": "15", "58": "150", "59": "151", "60": "152", "61": "153", "62": "154", "63": "155", "64": "156", "65": "157", "66": "158", "67": "159", "68": "16", "69": "160", "70": "161", "71": "162", "72": "163", "73": "164", "74": "165", "75": "166", "76": "167", "77": "168", "78": "169", "79": "17", "80": "170", "81": "171", "82": "172", "83": "173", "84": "174", "85": "175", "86": "176", "87": "177", "88": "178", "89": "179", "90": "18", "91": "180", "92": "181", "93": "182", "94": "183", "95": "184", "96": "185", "97": "186", "98": "187", "99": "188", "100": "189", "101": "19", "102": "190", "103": "191", "104": "192", "105": "193", "106": "194", "107": "195", "108": "197", "109": "198", "110": "199", "111": "2", "112": "20", "113": "200", "114": "201", "115": "202", "116": "203", "117": "204", "118": "205", "119": "206", "120": "207", "121": "208", "122": "209", "123": "21", "124": "210", "125": "211", "126": "212", "127": "213", "128": "214", "129": "215", "130": "216", "131": "217", "132": "218", "133": "219", "134": "22", "135": "220", "136": "221", "137": "222", "138": "223", "139": "224", "140": "225", "141": "226", "142": "227", "143": "228", "144": "229", "145": "23", "146": "230", "147": "231", "148": "232", "149": "233", "150": "234", "151": "235", "152": "236", "153": "237", "154": "238", "155": "239", "156": "24", "157": "240", "158": "241", "159": "242", "160": "243", "161": "244", "162": "245", "163": "246", "164": "247", "165": "248", "166": "249", "167": "25", "168": "250", "169": "251", "170": "252", "171": "253", "172": "254", "173": "255", "174": "256", "175": "257", "176": "258", "177": "259", "178": "26", "179": "260", "180": "261", "181": "262", "182": "263", "183": "264", "184": "265", "185": "266", "186": "267", "187": "268", "188": "269", "189": "27", "190": "270", "191": "271", "192": "272", "193": "273", "194": "274", "195": "275", "196": "276", "197": "277", "198": "278", "199": "279", "200": "28", "201": "280", "202": "281", "203": "282", "204": "283", "205": "284", "206": "285", "207": "286", "208": "287", "209": "288", "210": "289", "211": "29", "212": "290", "213": "291", "214": "292", "215": "293", "216": "294", "217": "295", "218": "296", "219": "297", "220": "298", "221": "299", "222": "3", "223": "30", "224": "300", "225": "301", "226": "302", "227": "303", "228": "304", "229": "305", "230": "306", "231": "307", "232": "308", "233": "309", "234": "31", "235": "310", "236": "311", "237": "312", "238": "313", "239": "314", "240": "315", "241": "316", "242": "317", "243": "318", "244": "319", "245": "32", "246": "320", "247": "321", "248": "322", "249": "323", "250": "324", "251": "325", "252": "326", "253": "327", "254": "328", "255": "329", "256": "33", "257": "330", "258": "331", "259": "332", "260": "333", "261": "334", "262": "335", "263": "336", "264": "337", "265": "338", "266": "339", "267": "34", "268": "340", "269": "341", "270": "342", "271": "343", "272": "344", "273": "345", "274": "346", "275": "347", "276": "348", "277": "349", "278": "35", "279": "350", "280": "351", "281": "352", "282": "353", "283": "354", "284": "355", "285": "356", "286": "357", "287": "358", "288": "359", "289": "36", "290": "360", "291": "361", "292": "362", "293": "363", "294": "364", "295": "365", "296": "366", "297": "367", "298": "368", "299": "369", "300": "37", "301": "370", "302": "371", "303": "372", "304": "373", "305": "374", "306": "375", "307": "376", "308": "377", "309": "378", "310": "379", "311": "38", "312": "380", "313": "381", "314": "382", "315": "383", "316": "384", "317": "385", "318": "386", "319": "387", "320": "388", "321": "389", "322": "39", "323": "390", "324": "391", "325": "392", "326": "393", "327": "394", "328": "395", "329": "396", "330": "397", "331": "398", "332": "399", "333": "4", "334": "40", "335": "400", "336": "401", "337": "402", "338": "403", "339": "404", "340": "405", "341": "406", "342": "407", "343": "408", "344": "409", "345": "41", "346": "410", "347": "411", "348": "412", "349": "413", "350": "414", "351": "415", "352": "416", "353": "417", "354": "418", "355": "419", "356": "42", "357": "420", "358": "421", "359": "422", "360": "423", "361": "424", "362": "425", "363": "426", "364": "427", "365": "428", "366": "43", "367": "44", "368": "45", "369": "46", "370": "47", "371": "48", "372": "49", "373": "5", "374": "50", "375": "51", "376": "52", "377": "53", "378": "54", "379": "55", "380": "56", "381": "57", "382": "58", "383": "59", "384": "6", "385": "60", "386": "61", "387": "62", "388": "63", "389": "64", "390": "65", "391": "66", "392": "67", "393": "68", "394": "69", "395": "7", "396": "70", "397": "71", "398": "72", "399": "73", "400": "74", "401": "75", "402": "76", "403": "77", "404": "78", "405": "79", "406": "8", "407": "80", "408": "81", "409": "82", "410": "83", "411": "84", "412": "85", "413": "86", "414": "87", "415": "88", "416": "89", "417": "9", "418": "90", "419": "91", "420": "92", "421": "93", "422": "94", "423": "95", "424": "96", "425": "97", "426": "98", "427": "99"}}}}], "splits": [{"name": "train", "num_bytes": 9688788846.77601, "num_examples": 25148}], "download_size": 9690107170, "dataset_size": 9688788846.77601}}
2023-05-20T07:14:48+00:00
b731f92f15fb78912170764f520a05b24d5341c2
OpenShape/openshape-objaverse-embeddings
[ "license:mit", "region:us" ]
2023-05-20T07:07:26+00:00
{"license": "mit"}
2023-07-03T02:17:19+00:00
0f6e3115004da82e19c2c9051f51e45fbd1b07b5
# Dataset Card for "chunk_91" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_91
[ "region:us" ]
2023-05-20T07:08:59+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1270331792, "num_examples": 249476}], "download_size": 1296389670, "dataset_size": 1270331792}}
2023-05-20T07:09:39+00:00
5c4b24d821d2e19c74ed58e9247fb8b1fa4ff909
# Dataset Card for "chunk_96" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_96
[ "region:us" ]
2023-05-20T07:09:08+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1248125580, "num_examples": 245115}], "download_size": 1273303637, "dataset_size": 1248125580}}
2023-05-20T07:09:46+00:00
c195fe6ee8ebfd6570f33bb917d3cf2b6449381f
# Dataset Card for "chunk_98" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_98
[ "region:us" ]
2023-05-20T07:10:18+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1247081720, "num_examples": 244910}], "download_size": 1271966974, "dataset_size": 1247081720}}
2023-05-20T07:10:59+00:00
0c050e1352410aa869129f35941ba81013c071fc
# Dataset Card for "chunk_93" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_93
[ "region:us" ]
2023-05-20T07:10:47+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1258044796, "num_examples": 247063}], "download_size": 1284316210, "dataset_size": 1258044796}}
2023-05-20T07:11:38+00:00
284cb78dce431acaa4d4fa759e472759a376e055
# Dataset Card for "chunk_92" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_92
[ "region:us" ]
2023-05-20T07:11:26+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1262316984, "num_examples": 247902}], "download_size": 1287803838, "dataset_size": 1262316984}}
2023-05-20T07:14:29+00:00
1986cb7db6d308bfc027383b10054a989d2bc90d
# Dataset Card for "chunk_94" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_94
[ "region:us" ]
2023-05-20T07:12:17+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1235253004, "num_examples": 242587}], "download_size": 1261067755, "dataset_size": 1235253004}}
2023-05-20T07:13:10+00:00
6a148e60598f0e73715d0b7aac51cd582d79cb28
Thouph/tag_part
[ "license:mit", "region:us" ]
2023-05-20T07:14:48+00:00
{"license": "mit", "viewer": false}
2023-05-29T12:51:49+00:00
537d95908eeb21888b37500f1b44090805c11c75
# Dataset Card for "chunk_90" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_90
[ "region:us" ]
2023-05-20T07:24:01+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1296275532, "num_examples": 254571}], "download_size": 1322328801, "dataset_size": 1296275532}}
2023-05-20T07:24:49+00:00
ec96ddc1420fe80eefb68649e72ae0932f81833f
# Dataset Card for "IAM_Sentences_LLaVA_json" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
alpayariyak/IAM_Sentences_LLaVA_json
[ "region:us" ]
2023-05-20T07:26:13+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "conversations", "list": [{"name": "from", "dtype": "string"}, {"name": "value", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 1259235, "num_examples": 5663}], "download_size": 418365, "dataset_size": 1259235}}
2023-05-20T07:45:35+00:00
a207a200b198bfeb187f3a70aed159bc3219086b
# Dataset Card for "chunk_95" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_95
[ "region:us" ]
2023-05-20T07:31:05+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1249113428, "num_examples": 245309}], "download_size": 1273890922, "dataset_size": 1249113428}}
2023-05-20T07:33:24+00:00
a45d9e5ba5fafdc8cdb48f9c498e81aec395b5e4
# Dataset Card for "chunk_99" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_99
[ "region:us" ]
2023-05-20T07:48:16+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1285760552, "num_examples": 252506}], "download_size": 1313552275, "dataset_size": 1285760552}}
2023-05-20T07:50:38+00:00
8cdc93ff2ab9669503912b21d12efaca8ff6aa07
basesssssp/bad-kemono-negative-embedding
[ "license:other", "region:us" ]
2023-05-20T08:23:40+00:00
{"license": "other"}
2023-05-24T13:59:33+00:00
c27e08d8b10740684cd2630f1d38f22ea31053cf
# Dataset Card for Synapse Test Dataset ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Point of Contact:** ### Dataset Summary This is a test dataset to highlight the functionality of a dataset script with a custom loading function. This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
katielink/synapse_test_dataset
[ "license:other", "synapse", "biology", "region:us" ]
2023-05-20T08:36:39+00:00
{"license": "other", "tags": ["synapse", "biology"]}
2023-05-20T08:57:31+00:00
643320b5e1997f8df7c75faf0c80ff6f1974d389
# Dataset Card for semantics-ws-qna-oa with ~2K entries. ### Dataset Summary License: Apache-2.0. Contains parquet of INSTRUCTION, RESPONSE, SOURCE and METADATA. - ### Original Datasets are available here: - https://leviants.com/multilingual-simlex999-and-wordsim353/ ### Paper of original Dataset: - https://arxiv.org/pdf/1508.00106v5.pdf
0x22almostEvil/semantics-ws-qna-oa
[ "task_categories:text-classification", "size_categories:1K<n<10K", "language:en", "language:ru", "language:de", "language:it", "license:apache-2.0", "semantics", "arxiv:1508.00106", "region:us" ]
2023-05-20T08:51:10+00:00
{"language": ["en", "ru", "de", "it"], "license": "apache-2.0", "size_categories": ["1K<n<10K"], "task_categories": ["text-classification"], "tags": ["semantics"]}
2023-05-21T06:08:16+00:00
1247a73ae2214d99b58f8a45a35f3ae11894766a
# Dataset Card for "chunk_106" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_106
[ "region:us" ]
2023-05-20T09:06:16+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 887902224, "num_examples": 174372}], "download_size": 904438425, "dataset_size": 887902224}}
2023-05-20T09:06:55+00:00
70515deafe99b8078e97e08f9d879ea0d5437aaf
svencard/sd
[ "license:openrail", "region:us" ]
2023-05-20T09:22:57+00:00
{"license": "openrail"}
2023-05-20T09:22:57+00:00
f23c247253a1c1e018c3ec0c97be74886807d434
# Dataset Card for "chunk_107" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_107
[ "region:us" ]
2023-05-20T09:24:59+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 888248480, "num_examples": 174440}], "download_size": 904980472, "dataset_size": 888248480}}
2023-05-20T09:26:36+00:00
cb91dc49471b15bcb65c9607863ef2a9f732b05a
# Dataset Card for "chunk_105" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_105
[ "region:us" ]
2023-05-20T09:35:46+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 970637040, "num_examples": 190620}], "download_size": 990788274, "dataset_size": 970637040}}
2023-05-20T09:37:30+00:00
29f524dc58948999dfecef96d87829af73ebe9a4
# Dataset Card for "chunk_109" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_109
[ "region:us" ]
2023-05-20T09:39:28+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1275337228, "num_examples": 250459}], "download_size": 1300854834, "dataset_size": 1275337228}}
2023-05-20T09:40:06+00:00
8c77a1fbd24ab13a0d6afe7cdafd080e1db95a79
# Dataset Card for "chunk_101" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_101
[ "region:us" ]
2023-05-20T09:43:24+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1346523388, "num_examples": 264439}], "download_size": 1375879377, "dataset_size": 1346523388}}
2023-05-20T09:44:00+00:00
8c14bc5ef3ddf154fae00ed749ed6ef58a85d080
# Dataset Card for "chunk_100" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_100
[ "region:us" ]
2023-05-20T09:43:42+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1289981820, "num_examples": 253335}], "download_size": 1317632489, "dataset_size": 1289981820}}
2023-05-20T09:44:23+00:00
b592c1f7cf13a520d2122429a6ca97a21b2ea738
# Dataset Card for "chunk_103" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_103
[ "region:us" ]
2023-05-20T09:48:36+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1389107784, "num_examples": 272802}], "download_size": 1394978083, "dataset_size": 1389107784}}
2023-05-20T09:49:22+00:00
467deb95581766cb8a8bf09d1dd5e2e064b80d6e
# Dataset Card for "chunk_108" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_108
[ "region:us" ]
2023-05-20T09:55:57+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1227172000, "num_examples": 241000}], "download_size": 1252378289, "dataset_size": 1227172000}}
2023-05-20T09:58:08+00:00
0e7bb0f2e36bba26c299046ea2fa17a3167d3b83
# Dataset Card for "quran_36-55" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
MojtabaDelavar97/quran_36-55
[ "region:us" ]
2023-05-20T10:01:48+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "Train", "num_bytes": 351337990.936, "num_examples": 1793}, {"name": "Test", "num_bytes": 23362508.0, "num_examples": 163}], "download_size": 322555956, "dataset_size": 374700498.936}}
2023-05-20T10:02:22+00:00
206620d419c08b5d20001f208c727d24ede3d5ff
# Dataset Card for "chunk_102" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_102
[ "region:us" ]
2023-05-20T10:19:02+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1307824188, "num_examples": 256839}], "download_size": 1336281185, "dataset_size": 1307824188}}
2023-05-20T10:21:24+00:00
28819ad5ce5b9b09afe8bb78b7fa2e2a700cf839
# Dataset Card for "chunk_104" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_104
[ "region:us" ]
2023-05-20T10:24:24+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1467127408, "num_examples": 288124}], "download_size": 1493058195, "dataset_size": 1467127408}}
2023-05-20T10:27:01+00:00
5f27e735e7933c768753f3fa7440aae201958d35
# Dataset Card for luotuo-QA-B ## Dataset Description - **Homepage:** https://github.com/LC1332/Luotuo-Chinese-LLM - **Repository:** https://github.com/LC1332/Luotuo-QA - **Point of Contact:** [email protected] ### Dataset Summary Anki_Card是一种用于记忆和学习的电子卡片系统。我们建立了一个类似于这种形式的问答数据集,旨在推动中英文语境下问答模型的研究和发展。 我们的数据集是在3个开源数据集之上生成构建的,这3个数据集分别是: ·Chinese Scientific Literature Dataset ·CNN-DailyMail News Text Summarization ·arXiv Dataset 您可以直接搜索这些原始数据集的名称或是从以下链接访问它们 ·https://github.com/ydli-ai/CSL ·https://www.kaggle.com/datasets/gowrishankarp/newspaper-text-summarization-cnn-dailymail ·https://www.kaggle.com/datasets/Cornell-University/arxiv 我们在这些数据集的基础上针对每一个摘要或新闻生成了5个“问题-答案”对。数据分布如下: ---从Chinese Scientific Literature Dataset(CSL)数据集中生成了25836条中文数据,共129180个问答对。 ---从CNN-DailyMail News Text Summarization数据集中生成了2026条数据,共10130个问答对。 ---从arXiv Dataset数据集中生成了3602条英文数据,共18010个问答对。 此外,由于此数据集是我们Luotuo-QA项目的一部分,我们将它叫做luotuo-QA-B。 您可以在这里查看Luotuo-QA项目:https://github.com/LC1332/Luotuo-QA 此数据集适用于训练和评估中文对话式问答模型。有益于推动中文自然语言处理领域的发展,同时也为研究人员和开发者提供了一个基准,用于比较不同模型的性能和探索新的方法。 我们希望这一工作能够促进全球范围内中文语境对话式问答任务的研究和进一步的创新。 ----------------------------------------------------------------------------------------------------------------------------------------------- Anki_Card is an electronic flashcard system used for memory and learning. We have created a question-and-answer dataset in a similar format to facilitate research and development of question-answering models in both Chinese and English contexts. Our dataset is constructed based on three open-source datasets: ·Chinese Scientific Literature Dataset ·CNN-DailyMail News Text Summarization ·arXiv Dataset You can directly search for the names of these original datasets or access them from the following links: ·Chinese Scientific Literature Dataset (CSL): https://github.com/ydli-ai/CSL ·CNN-DailyMail News Text Summarization: https://www.kaggle.com/datasets/gowrishankarp/newspaper-text-summarization-cnn-dailymail ·arXiv Dataset: https://www.kaggle.com/datasets/Cornell-University/arxiv Based on these datasets, we have generated five "question-answer" pairs for each summary or news article. The data distribution is as follows: ---From the Chinese Scientific Literature Dataset (CSL), we generated 25,836 Chinese data points, resulting in a total of 129,180 question-answer pairs. ---From the CNN-DailyMail News Text Summarization dataset, we generated 2,026 data points, resulting in a total of 10,130 question-answer pairs. ---From the arXiv Dataset, we generated 3,602 English data points, resulting in a total of 18,010 question-answer pairs. Furthermore, as this dataset is part of our Luotuo-QA project, we refer to it as luotuo-QA-B. You can find the Luotuo-QA project here: https://github.com/LC1332/Luotuo-QA This dataset is suitable for training and evaluating Chinese conversational question-answering models. It contributes to the development of Chinese natural language processing and provides researchers and developers with a benchmark for comparing the performance of different models and exploring new approaches. We hope that this work will promote research and further innovation in Chinese conversational question-answering tasks on a global scale. ### Languages CHINESE, ENGLISH ### Data Instances 中文数据样例: ``` { "story": "针对已有神经网络功放建模的建模精度不高,易陷入局部极值等问题,提出一种新的改进并行粒子群算法(Improved Parallel Particle Swarm Optimization,IPPSO)。该算法在并行粒子群算法的基础上引入自适应变异操作,防止陷入局部最优;在微粒的速度项中加入整体微粒群的全局最优位置,动态调节学习因子与线性递减惯性权重,加快微粒收敛。将该改进算法用于优化RBF神经网络参数,并用优化的网络对非线性功放进行建模仿真。结果表明,该算法能有效减小建模误差,且均方根误差提高19.08%,进一步提高了神经网络功放建模精度。", "QA": [ { "question": "什么是IPPSO算法?", "answer": "IPPSO是一种改进的并行粒子群算法,引入自适应变异操作,防止陷入局部最优;在微粒的速度项中加入整体微粒群的全局最优位置,动态调节学习因子与线性递减惯性权重,加快微粒收敛。" }, { "question": "IPPSO算法用于什么?", "answer": "IPPSO算法用于优化RBF神经网络参数,并用优化的网络对非线性功放进行建模仿真。" }, { "question": "IPPSO算法的优点是什么?", "answer": "IPPSO算法能有效减小建模误差,且均方根误差提高19.08%,进一步提高了神经网络功放建模精度。" }, { "question": "为什么需要改进并行粒子群算法?", "answer": "因为已有的神经网络功放建模的建模精度不高,易陷入局部极值等问题。" }, { "question": "IPPSO算法如何防止陷入局部最优?", "answer": "IPPSO算法引入自适应变异操作,防止陷入局部最优;在微粒的速度项中加入整体微粒群的全局最优位置,动态调节学习因子与线性递减惯性权重,加快微粒收敛。" } ] } ``` 英文数据样例: ``` { "story": "We discuss an alternative non-perturbative proof of Bertrand's theorem that leads in a concise way directly to the two allowed fields: the newtonian and the isotropic harmonic oscillator central fields.", "QA": [ { "question": "What is Bertrand's theorem?", "answer": "It is a theorem that leads to the two allowed fields: the newtonian and the isotropic harmonic oscillator central fields." }, { "question": "What is the proof of Bertrand's theorem like?", "answer": "It is an alternative non-perturbative proof." }, { "question": "How many central fields are allowed by Bertrand's theorem?", "answer": "Two central fields are allowed: the newtonian and the isotropic harmonic oscillator." }, { "question": "What is the advantage of the proof discussed in the text?", "answer": "It leads directly to the allowed fields in a concise way." }, { "question": "What are the two allowed fields according to Bertrand's theorem?", "answer": "The newtonian and the isotropic harmonic oscillator central fields." } ] } ``` ### Licensing Information 我们的协议与三个原始数据集的协议保持一致,请阅读以下内容。 ·CSL数据集的协议是Apache License 2.0,除非遵守许可证,否则您不得使用此文件 ·CNN-DailyMail News Text Summarization数据集的协议是 CC0: Public Domain ·arXiv数据集的协议是 CC0: Public Domain ----------------------------------------------------------------------------------------------------------------------------------------------- Our agreements are consistent with the agreements of three original datasets. Please read the following information. · The protocol for the CSL dataset is Apache License 2.0. You are not allowed to use this file unless you comply with the license. · The protocol for the CNN-DailyMail News Text Summarization dataset is CC0: Public Domain. · The protocol for the arXiv dataset is CC0: Public Domain. ### Citation Information 如果您在项目中使用了我们的模型、代码或者数据,请引用我们。 Please cite us if you use the data or code in this repo. ```bibtex @misc{alpaca, author={Jianshen Liao, Ao Sun, Qinyu Luo, Hongsen Huang, Cheng Li}, title = {Luotuo-QA: Better Conversational Question Answering Model with Answer Completion}, year = {2023}, publisher = {GitHub}, journal = {GitHub repository}, howpublished = {\url{https://github.com/LC1332/Luotuo-QA}}, } ```
Logic123456789/luotuoQA-B
[ "task_categories:question-answering", "language:zh", "language:en", "license:other", "region:us" ]
2023-05-20T10:26:01+00:00
{"language": ["zh", "en"], "license": "other", "task_categories": ["question-answering"], "extra_gated_prompt": "\u6211\u4eec\u5236\u4f5c\u4e86luotuo-QA-B\u6570\u636e\u96c6\uff0c\u8bf7\u4ed4\u7ec6\u9605\u8bfbLicensing Information\u90e8\u5206\u7684\u4fe1\u606f\u3002", "extra_gated_heading": "\u60a8\u9700\u8981\u63a5\u53d7\u534f\u8bae\u5e76\u63d0\u4ea4\u4fe1\u606f\u4ee5\u83b7\u53d6\u6b64\u6570\u636e\u96c6", "extra_gated_fields": {"\u59d3\u540d": "text", "\u90ae\u7bb1": "text", "\u6240\u5728\u7ec4\u7ec7": "text", "\u4f7f\u7528\u76ee\u7684": "text", "\u6211\u540c\u610f\u4ec5\u5c06\u6b64\u6570\u636e\u96c6\u7528\u4e8e\u975e\u5546\u4e1a\u7528\u9014": "checkbox"}, "extra_gated_button_content": "\u6211\u5df2\u9605\u8bfb\u534f\u8bae\u5e76\u540c\u610f\u63d0\u4f9b\u76f8\u5173\u4fe1\u606f"}
2023-05-22T06:29:30+00:00
f2bb1a4568acf58a5e9c29f6f48976b9ddd2481c
# Clima500 Dataset GitHub Repo : https://github.com/mbzuai-oryx/ClimateGPT Please cite our work : ``` @inproceedings{mullappilly2023arabic, title={Arabic Mini-ClimateGPT: A Climate Change and Sustainability Tailored Arabic LLM}, author={Mullappilly, Sahal and Shaker, Abdelrahman and Thawakar, Omkar and Cholakkal, Hisham and Anwer, Rao and Khan, Salman and Khan, Fahad}, booktitle={Findings of the Association for Computational Linguistics: EMNLP 2023}, pages={14126--14136}, year={2023} } ```
mbzuai-oryx/Clima500
[ "license:cc-by-nc-sa-4.0", "region:us" ]
2023-05-20T11:05:12+00:00
{"license": "cc-by-nc-sa-4.0"}
2024-02-16T08:27:53+00:00
b6670d2699663783081979d99cd8ca07ea3c753b
# Dataset Card for "chunk_111" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_111
[ "region:us" ]
2023-05-20T11:18:39+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1173232444, "num_examples": 230407}], "download_size": 1180373791, "dataset_size": 1173232444}}
2023-05-20T11:20:46+00:00
5fa513a5085a44e16dd096ccc8e8d8de175aba3a
# Dataset Card for "chunk_112" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_112
[ "region:us" ]
2023-05-20T11:28:35+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1315910284, "num_examples": 258427}], "download_size": 1341764002, "dataset_size": 1315910284}}
2023-05-20T11:29:15+00:00
3116a829e71afd628394f059a1c3d3203029a5a5
# Dataset Card for "instruct_data" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Shoubhik8/instruct_data
[ "region:us" ]
2023-05-20T11:36:02+00:00
{"dataset_info": {"features": [{"name": "instructions", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 316774393, "num_examples": 320339}], "download_size": 11233992, "dataset_size": 316774393}}
2023-05-20T11:36:07+00:00
55183841a6a83bc392ecebbbb2b2e8f0bce48953
# Dataset Card for "chunk_110" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_110
[ "region:us" ]
2023-05-20T11:38:22+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1211799252, "num_examples": 237981}], "download_size": 1228916584, "dataset_size": 1211799252}}
2023-05-20T11:40:35+00:00
f985549623b27df0db9267cf364cf23d19bcca6d
# Dataset Card for "chunk_114" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_114
[ "region:us" ]
2023-05-20T11:41:53+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1443627828, "num_examples": 283509}], "download_size": 1471160071, "dataset_size": 1443627828}}
2023-05-20T11:42:41+00:00
722f310099f5d96c70a14441236e6e35156aae23
# Dataset Card for "chunk_113" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_113
[ "region:us" ]
2023-05-20T11:42:26+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1532172616, "num_examples": 300898}], "download_size": 1561809638, "dataset_size": 1532172616}}
2023-05-20T11:43:11+00:00
735cec6dc5acc0f13844da545284f1384420f380
# Dataset Card for "chunk_116" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_116
[ "region:us" ]
2023-05-20T11:47:51+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1390421520, "num_examples": 273060}], "download_size": 1417200673, "dataset_size": 1390421520}}
2023-05-20T11:49:03+00:00
c284009366207e51e44e5b7e86989fa1e29b6bda
# Dataset Card for "squad_as2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
lucadiliello/squad_as2
[ "region:us" ]
2023-05-20T11:55:47+00:00
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 98242758, "num_examples": 441978}, {"name": "dev", "num_bytes": 6088351, "num_examples": 26677}, {"name": "test", "num_bytes": 6161786, "num_examples": 26925}], "download_size": 16183526, "dataset_size": 110492895}}
2023-05-20T11:55:58+00:00
294ff155a2f6486ee6038cb49ec59f5ed3c0f257
# Dataset Card for "chunk_117" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_117
[ "region:us" ]
2023-05-20T12:07:12+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1503153308, "num_examples": 295199}], "download_size": 1532705611, "dataset_size": 1503153308}}
2023-05-20T12:07:56+00:00
a36a71f560eca2651944fb56109edcf42ed4c8a5
# moss-003-sft-data ## Conversation Without Plugins ### Categories | Category | \# samples | |----------------------|-----------:| | Brainstorming | 99,162 | | Complex Instruction | 95,574 | | Code | 198,079 | | Role Playing | 246,375 | | Writing | 341,087 | | Harmless | 74,573 | | Others | 19,701 | | Total | 1,074,551 | **Others** contains two categories: **Continue**(9,839) and **Switching**(9,862). The **Continue** category refers to instances in a conversation where the user asks the system to continue outputting the response from the previous round that was not completed. The **Switching** category refers to instances in a conversation where the user switches the language they are using. We remove the data for honesty because it contains private information.
fnlp/moss-003-sft-data
[ "license:cc-by-4.0", "region:us" ]
2023-05-20T12:07:50+00:00
{"license": "cc-by-4.0"}
2023-07-09T14:09:50+00:00
2bff9a4431f58f414ce8469356a49f4a23eb0a71
# Dataset Card for "instruct_data_train" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Shoubhik8/instruct_data_train
[ "region:us" ]
2023-05-20T12:17:04+00:00
{"dataset_info": {"features": [{"name": "instructions", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 219317265, "num_examples": 220000}], "download_size": 9631975, "dataset_size": 219317265}}
2023-05-20T12:17:10+00:00
1f3c6b9fd28936e8284fd5638783ac9bbbc0dd87
# Dataset Card for "instruct_data_valid" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Shoubhik8/instruct_data_valid
[ "region:us" ]
2023-05-20T12:17:10+00:00
{"dataset_info": {"features": [{"name": "instructions", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 100019840, "num_examples": 100339}], "download_size": 4396637, "dataset_size": 100019840}}
2023-05-20T12:17:15+00:00
477e31aa0d8924f62e24102505f5eb6d4a483bb3
LoliOverflow/DUMP
[ "license:other", "region:us" ]
2023-05-20T12:26:48+00:00
{"license": "other"}
2023-06-12T00:54:20+00:00
ea3f82945639f59a978313218309114b461c2b96
This dataset was taken from the creators [GitHub repository](https://github.com/salute-developers/golos/tree/master/dusha) and converted for my own studying needs. # Dusha dataset Dusha is a bi-modal corpus suitable for speech emotion recognition (SER) tasks. The dataset consists of about 300 000 audio recordings with Russian speech, their transcripts and emotional labels. The corpus contains approximately 350 hours of data. Four basic emotions that usually appear in a dialog with a virtual assistant were selected: Happiness (Positive), Sadness, Anger and Neutral emotion. ## **License** [English Version](https://github.com/salute-developers/golos/blob/master/license/en_us.pdf) [Russian Version](https://github.com/salute-developers/golos/blob/master/license/ru.pdf) ## **Authors** - Artem Sokolov - Fedor Minkin - Nikita Savushkin - Nikolay Karpov - Oleg Kutuzov - Vladimir Kondratenko
KELONMYOSA/dusha_emotion_audio
[ "task_categories:audio-classification", "size_categories:100K<n<1M", "language:ru", "region:us" ]
2023-05-20T12:31:10+00:00
{"language": ["ru"], "size_categories": ["100K<n<1M"], "task_categories": ["audio-classification"], "pretty_name": "Russian speech emotions"}
2023-05-28T09:15:05+00:00
d6a8e1ef01247323c9dd699f36af42cd718dfb18
# Dataset Card for Dataset Name ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
stilletto/earth_canny
[ "region:us" ]
2023-05-20T13:05:20+00:00
{}
2023-05-20T13:20:05+00:00
c8904285b707bdccf9ee9097535b9ab9074b346e
This repository contains a total of 483 tabular datasets with meaningful column names collected from OpenML, UCI, and Kaggle platforms. The last column of each dataset is the label column. For more details, please refer to our paper https://arxiv.org/abs/2305.09696. You can use the [code](https://github.com/ZhangTP1996/TapTap/blob/master/load_pretraining_datasets.py) to load all the datasets into a dictionary of pd.DataFrame. An example script can be found below: ```python from datasets import load_dataset import pandas as pd import numpy as np data = {} dataset = load_dataset(path='ztphs980/taptap_datasets') dataset = dataset['train'].to_dict() for table_name, table in zip(dataset['dataset_name'], dataset['table']): table = pd.DataFrame.from_dict(eval(table, {'nan': np.nan})) data[table_name] = table ```
ztphs980/taptap_datasets
[ "language:en", "license:mit", "arxiv:2305.09696", "region:us" ]
2023-05-20T13:34:39+00:00
{"language": ["en"], "license": "mit"}
2023-05-23T11:32:37+00:00
eef6c8aa5ff62627032e20accb35b7f43a0fc144
# Dataset Card for "chunk_126" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_126
[ "region:us" ]
2023-05-20T14:15:53+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1249627720, "num_examples": 245410}], "download_size": 1274836307, "dataset_size": 1249627720}}
2023-05-20T14:16:59+00:00
7060bfccee3a7930e57ac81ef2ff7174ea1d5a14
# Dataset Card for "chunk_125" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_125
[ "region:us" ]
2023-05-20T14:24:10+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1300761584, "num_examples": 255452}], "download_size": 1326461313, "dataset_size": 1300761584}}
2023-05-20T14:24:50+00:00
14612aeb0dcea4a70c4a739f330a291897df4c16
# Dataset Card for "chunk_118" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_118
[ "region:us" ]
2023-05-20T14:25:33+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1428346736, "num_examples": 280508}], "download_size": 1458298434, "dataset_size": 1428346736}}
2023-05-20T14:28:22+00:00
61f02ca7e1f8027feed1a0c98d39d807b4cb45f7
# toxic-detection-testset ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Citation Information](#citation-information) ## Dataset Description ### Dataset Summary This dataset a test set for toxic detection that contains both clean data and it's perturbed version with human-written perturbations online. In addition, our dataset can be used to benchmark misspelling correctors as well. ### Languages English ## Dataset Structure ### Data Instances ``` { "clean_version": "this is pretty much exactly how i feel damn", "perturbed_version": "this is pretty much exactly how i feel daaammnn", "toxicity": 0.7, "obscene": 0.7, "sexual_explicit": 0, "identity_attack": 0, ... "insult": 0.2, "quality_mean": 4 } ``` ### Data Fields This dataset is derived from the [Jigsaw data](https://www.kaggle.com/competitions/jigsaw-unintended-bias-in-toxicity-classification/data). Hence, it keeps all the useful metrics and attributes. **Main** * clean_version * perturbed_version **Metrics** * toxicity * severe_toxicity * obscene * threat * insult * identity_attack * sexual_explicit **Identity attributes** * male * female * transgender * other_gender * heterosexual * homosexual_gay_or_lesbian * bisexual * other_sexual_orientation * christian * jewish * muslim * hindu * buddhist * atheist * other_religion * black * white * asian * latino * other_race_or_ethnicity * physical_disability * intellectual_or_learning_disability * psychiatric_or_mental_illness * other_disability ### Data Splits test: 1339 ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization Jigsaw is a famous toxic speech classification dataset containing approximately 2 million public comments from the Civil Comments platform. In addition to the toxic score labels for toxicity classification, the Jigsaw dataset also provides several toxicity sub-type dimensions which indicate particular comments' target groups, such as male, female, black, and Asian. Due to these prolific identity annotations and significant data volume, we adopt this dataset as our raw data source. Since the dataset has been used as the standard benchmark dataset for content moderation tasks, this adoption will also help reduce the entry barrier in adopting NoisyHate from the community. Since the comments from the Jigsaw dataset contain a lot of special characters, emojis, and informal language, data cleaning was necessary to ensure data quality. Following a typical text processing pipeline, we removed duplicated texts, special characters, special punctuation, hyperlinks, and numbers. Since we only focused on English texts, sentences containing non-standard English words were filtered out. 13,1982 comments remained after this cleaning step. #### Who are the source language producers? The source data is provided by the Conversation AI team, a research initiative founded by Jigsaw and Google. ### Annotations #### Annotation process In the annotation process, we display a guideline to explain the definition of human-generated perturbation and provide examples of both high-quality and low-quality perturbations. This training phase has been suggested to warrant high-quality responses from the human worker, especially for labeling tasks. Each MTurk worker is then presented with a pair of a perturbed sentences and its clean version and is asked to determine the quality of the perturbed one (Guideline and UI can be found in our [paper](#citation-information)). We recruited five different workers from the North America region through five assignments to assess each pair. A five-second countdown timer was also set for each task to ensure workers spent enough time on it. To ensure the quality of their responses, we designed an attention question that asks them to click on the perturbed word in the given sentences before they provide their quality ratings. Workers who cannot correctly identify the perturbation's location in the given sentence will be blocked for future batches. We aimed to pay the workers at an average rate of \$10 per hour, which is well above the federal minimum wage (\$7.25 per hour). The payment of each task was estimated by the average length of the sentences, which totals around 25 words per pair, and the average reading speed of native speakers is around 228 words per minute. #### Who are the annotators? US Amazon MTurk workers with HIT Approval Rate greater than 98%, and Number of HITs approved greater than 1000. ### Personal and Sensitive Information N/A ## Additional Information ### Dataset Curators [More Information Needed] ### Citation Information paper is coming soon
NoisyHate/Noisy_Hate_Data
[ "region:us" ]
2023-05-20T14:30:53+00:00
{}
2023-05-20T14:37:38+00:00
0fd7166f1344baf5fe20826cabf920ec5866dcd5
# Dataset Card for "chunk_115" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_115
[ "region:us" ]
2023-05-20T14:33:40+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1459738916, "num_examples": 286673}], "download_size": 1477815325, "dataset_size": 1459738916}}
2023-05-20T14:34:41+00:00
3e4e750b1783371c87e3145d5fbba2a4193e0631
OAI reverse proxy log data to be found on the Internet until 2023-06-17.<br> The dataset was built to fit the Vicuna format, but some modifications are required if you are actually learning.<br> There are three types: GPT3.5, GPT4, and claude<br> <br> This dataset contains vast amounts of AI chatting data (in TavernAI, RisuAI, etc.)<br> I didn't dedup the dataset
squarelike/ReverseProxy-OAI-Log
[ "language:en", "license:apache-2.0", "region:us" ]
2023-05-20T14:34:14+00:00
{"language": ["en"], "license": "apache-2.0"}
2023-06-16T17:32:28+00:00
9fcc5348eb53d31b09bd2ae47cf6a2a652811ceb
# Dataset Card for "chunk_120" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_120
[ "region:us" ]
2023-05-20T14:36:00+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1429039248, "num_examples": 280644}], "download_size": 1459712829, "dataset_size": 1429039248}}
2023-05-20T14:37:15+00:00
a5fe04b419e61c8d2685f36f4cc2b3a6cce96eca
# Dataset Card for "chunk_122" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_122
[ "region:us" ]
2023-05-20T14:36:35+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1528195764, "num_examples": 300117}], "download_size": 1559980145, "dataset_size": 1528195764}}
2023-05-20T14:37:24+00:00
94e4f728c81cf7b0f66f53a9846cb38e506e5daa
# Dataset Card for "6a171c66" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
results-sd-v1-5-sd-v2-1-if-v1-0-karlo/6a171c66
[ "region:us" ]
2023-05-20T14:44:17+00:00
{"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 188, "num_examples": 10}], "download_size": 1335, "dataset_size": 188}}
2023-05-20T14:44:18+00:00
ac0d9274c48e99c202d330bd8921159dd86d9d4d
# Dataset Card for "chunk_123" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_123
[ "region:us" ]
2023-05-20T14:50:25+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1564679944, "num_examples": 307282}], "download_size": 1596374895, "dataset_size": 1564679944}}
2023-05-20T14:51:48+00:00
cb2eb45cd1552852cbc4ef4a82481759c4783674
AlketaR/embedded_faqs_medicarealk
[ "license:openrail", "region:us" ]
2023-05-20T14:51:34+00:00
{"license": "openrail"}
2023-05-20T14:51:34+00:00
b5fad049c47637d3c27cf4044c1b9624fc8f6118
# Dataset Card for "chunk_119" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_119
[ "region:us" ]
2023-05-20T15:01:26+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1367975984, "num_examples": 268652}], "download_size": 1396440566, "dataset_size": 1367975984}}
2023-05-20T15:03:54+00:00
2279eaf9f2580aef77ed6fa0efd7846c381ab5a0
A Question Answering Benchmark with Implicit Reasoning Strategies The StrategyQA dataset was created through a crowdsourcing pipeline for eliciting creative and diverse yes/no questions that require implicit reasoning steps. To solve questions in StrategyQA, the reasoning steps should be inferred using a strategy. To guide and evaluate the question answering process, each example in StrategyQA was annotated with a decomposition into reasoning steps for answering it, and Wikipedia paragraphs that provide evidence for the answer to each step. Illustrated in the figure below: Questions in StrategyQA (Q1) require implicit reasoning, in contrast to multi-step questions that explicitly specify the reasoning process (Q2). Each training example contains a question (Q1), yes/no answer (A), decomposition (D), and evidence paragraphs (E). [strategyqa_test](https://huggingface.co/datasets/voidful/StrategyQA/resolve/main/strategyqa_test.json) [strategyqa_train](https://huggingface.co/datasets/voidful/StrategyQA/blob/main/strategyqa_train.json) [strategyqa_train_filtered](https://huggingface.co/datasets/voidful/StrategyQA/blob/main/strategyqa_train_filtered.json) [strategyqa_train_paragraphs](https://huggingface.co/datasets/voidful/StrategyQA/blob/main/strategyqa_train_paragraphs.json) Paper Title: Did Aristotle Use a Laptop? A Question Answering Benchmark with Implicit Reasoning Strategies Authors: Mor Geva, Daniel Khashabi, Elad Segal, Tushar Khot, Dan Roth, Jonathan Berant Transactions of the Association for Computational Linguistics (TACL), 2021 Citation: ``` @article{geva2021strategyqa, title = {{Did Aristotle Use a Laptop? A Question Answering Benchmark with Implicit Reasoning Strategies}}, author = {Geva, Mor and Khashabi, Daniel and Segal, Elad and Khot, Tushar and Roth, Dan and Berant, Jonathan}, journal = {Transactions of the Association for Computational Linguistics (TACL)}, year = {2021}, } ```
voidful/StrategyQA
[ "region:us" ]
2023-05-20T15:02:29+00:00
{}
2023-05-20T15:06:43+00:00
516e22639fded993e8e7af957943c84a315a2d84
# Dataset Card for "chunk_124" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_124
[ "region:us" ]
2023-05-20T15:10:11+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 1518765380, "num_examples": 298265}], "download_size": 1549975356, "dataset_size": 1518765380}}
2023-05-20T15:13:04+00:00
e0c1dd23ce0d62f55392184515ac55975625b37b
Training and test data for the task of Recognizing Semantic Differences (RSD). [See the paper](https://arxiv.org/abs/2305.13303) for details on how the dataset was created, and see our code at https://github.com/ZurichNLP/recognizing-semantic-differences for an example of how to use the data for evaluation. The data are derived from the [SemEval-2016 Task 2 for Interpretable Semantic Textual Similarity](https://alt.qcri.org/semeval2016/task2/) organized by [Agirre et al. (2016)](http://dx.doi.org/10.18653/v1/S16-1082). The original URLs of the data are: * Train: http://alt.qcri.org/semeval2016/task2/data/uploads/train_2015_10_22.utf-8.tar.gz * Test: http://alt.qcri.org/semeval2016/task2/data/uploads/test_goldstandard.tar.gz The translations into non-English languages have been created using machine translation (DeepL). ## Citation ```bibtex @inproceedings{vamvas-sennrich-2023-rsd, title={Towards Unsupervised Recognition of Token-level Semantic Differences in Related Documents}, author={Jannis Vamvas and Rico Sennrich}, month = dec, year = "2023", booktitle = "Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing", address = "Singapore", publisher = "Association for Computational Linguistics", } ```
ZurichNLP/rsd-ists-2016
[ "task_categories:token-classification", "language_creators:machine-generated", "size_categories:1K<n<10K", "language:en", "language:de", "language:es", "language:fr", "language:ja", "language:ko", "language:zh", "license:cc-by-sa-4.0", "arxiv:2305.13303", "region:us" ]
2023-05-20T15:24:04+00:00
{"language_creators": ["machine-generated"], "language": ["en", "de", "es", "fr", "ja", "ko", "zh"], "license": "cc-by-sa-4.0", "size_categories": ["1K<n<10K"], "task_categories": ["token-classification"], "dataset_info": {"features": [{"name": "tokens_a", "sequence": "string"}, {"name": "tokens_b", "sequence": "string"}, {"name": "labels_a", "sequence": "float64"}, {"name": "labels_b", "sequence": "float64"}, {"name": "lang_a", "dtype": "string"}, {"name": "lang_b", "dtype": "string"}, {"name": "subset", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "alignments", "dtype": "string"}], "splits": [{"name": "train_en", "num_bytes": 1640900, "num_examples": 1506}, {"name": "train_de", "num_bytes": 1101404, "num_examples": 3012}, {"name": "train_es", "num_bytes": 1154765, "num_examples": 3012}, {"name": "train_fr", "num_bytes": 1206414, "num_examples": 3012}, {"name": "train_ja", "num_bytes": 838252, "num_examples": 3012}, {"name": "train_ko", "num_bytes": 829328, "num_examples": 3012}, {"name": "train_zh", "num_bytes": 796140, "num_examples": 3012}, {"name": "test_en", "num_bytes": 833900, "num_examples": 750}, {"name": "test_de", "num_bytes": 558624, "num_examples": 1500}, {"name": "test_es", "num_bytes": 580224, "num_examples": 1500}, {"name": "test_fr", "num_bytes": 610017, "num_examples": 1500}, {"name": "test_ja", "num_bytes": 425912, "num_examples": 1500}, {"name": "test_ko", "num_bytes": 424407, "num_examples": 1500}, {"name": "test_zh", "num_bytes": 403680, "num_examples": 1500}], "download_size": 2569205, "dataset_size": 11403967}}
2023-11-22T12:35:55+00:00
be44df7436d8725223014951f9a7030c62d15893
# Dataset Card for "celebrity-identities" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dassum/celebrity-identities
[ "region:us" ]
2023-05-20T15:43:03+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "Lionel_Messi", "1": "Neymer_Jr", "2": "Rafael_Nadal", "3": "Roger_Federer", "4": "Zidane"}}}}], "splits": [{"name": "train", "num_bytes": 2867876.0, "num_examples": 15}], "download_size": 2856433, "dataset_size": 2867876.0}}
2023-05-20T15:43:08+00:00
cd5347d6bed90e6da57d8b75c449ecb996cf90dc
# Dataset Card for "chunk_131" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mask-distilled-one-sec-cv12/chunk_131
[ "region:us" ]
2023-05-20T15:46:50+00:00
{"dataset_info": {"features": [{"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 823325480, "num_examples": 161690}], "download_size": 837150081, "dataset_size": 823325480}}
2023-05-20T15:47:37+00:00