sha
stringlengths
40
40
text
stringlengths
1
13.4M
id
stringlengths
2
117
tags
listlengths
1
7.91k
created_at
stringlengths
25
25
metadata
stringlengths
2
875k
last_modified
stringlengths
25
25
arxiv
listlengths
0
25
languages
listlengths
0
7.91k
tags_str
stringlengths
17
159k
text_str
stringlengths
1
447k
text_lists
listlengths
0
352
processed_texts
listlengths
1
353
tokens_length
listlengths
1
353
input_texts
listlengths
1
40
3911beb4f771917c07ebd4d30ea383877bfa6637
# Dataset Card for "example_pairs" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
andersonbcdefg/example_pairs
[ "region:us" ]
2023-11-26T03:19:12+00:00
{"dataset_info": {"features": [{"name": "anchor", "dtype": "string"}, {"name": "positive", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1985788, "num_examples": 1000}], "download_size": 1150009, "dataset_size": 1985788}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-26T03:19:15+00:00
[]
[]
TAGS #region-us
# Dataset Card for "example_pairs" More Information needed
[ "# Dataset Card for \"example_pairs\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"example_pairs\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"example_pairs\"\n\nMore Information needed" ]
1e2f41c62a3bdd6ef2591dfc976403fef709849c
https://huggingface.co/datasets/meta-math/MetaMathQA features: mathematics, single-turn, task length: 395k
Wanfq/metamathqa
[ "region:us" ]
2023-11-26T03:20:26+00:00
{}
2023-11-26T04:23:54+00:00
[]
[]
TAGS #region-us
URL features: mathematics, single-turn, task length: 395k
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
bb3dc8b7aa88f51460b301a4523d2d180b6ea1fe
https://huggingface.co/datasets/HuggingFaceH4/no_robots features: general, multi-turn, task length: 9.5k
Wanfq/no_robots
[ "region:us" ]
2023-11-26T03:22:56+00:00
{}
2023-11-26T04:24:22+00:00
[]
[]
TAGS #region-us
URL features: general, multi-turn, task length: 9.5k
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
3293639fc5120a583bb686b558f44daba2199c44
https://huggingface.co/datasets/OpenAssistant/oasst_top1_2023-08-25 features: general, multi-turn, chat length: 12.9k
Wanfq/oasst_top1
[ "region:us" ]
2023-11-26T03:23:22+00:00
{}
2023-11-26T04:24:59+00:00
[]
[]
TAGS #region-us
URL features: general, multi-turn, chat length: 12.9k
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
5d5b0146f78f798658ed7d1a600beb6ab8676768
# Dataset Card for "example_triplet" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
andersonbcdefg/example_triplet
[ "region:us" ]
2023-11-26T03:29:52+00:00
{"dataset_info": {"features": [{"name": "anchor", "dtype": "string"}, {"name": "label", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 272226766, "num_examples": 160956}], "download_size": 174006237, "dataset_size": 272226766}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-26T03:31:57+00:00
[]
[]
TAGS #region-us
# Dataset Card for "example_triplet" More Information needed
[ "# Dataset Card for \"example_triplet\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"example_triplet\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"example_triplet\"\n\nMore Information needed" ]
ea5287a6f7ef0f92883a738fe922d687c49ed29c
https://huggingface.co/datasets/shahules786/orca-best features: general, single-turn, task length: 329k
Wanfq/orca_best
[ "region:us" ]
2023-11-26T03:38:02+00:00
{}
2023-11-26T04:25:38+00:00
[]
[]
TAGS #region-us
URL features: general, single-turn, task length: 329k
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
242d33dce3e187d88242df8190a220206b06482f
https://huggingface.co/datasets/LDJnr/Pure-Dove features: general multi-turn, chat length: 3.86k
Wanfq/pure_dove
[ "region:us" ]
2023-11-26T03:47:44+00:00
{}
2023-11-26T04:26:14+00:00
[]
[]
TAGS #region-us
URL features: general multi-turn, chat length: 3.86k
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
89b7ae486afef53815ede44936b7f30ed4564b89
https://huggingface.co/datasets/ajibawa-2023/Python-Code-23k-ShareGPT features: coding, single-turn, task length: 22.6k
Wanfq/python_code
[ "region:us" ]
2023-11-26T03:49:43+00:00
{}
2023-11-26T04:26:45+00:00
[]
[]
TAGS #region-us
URL features: coding, single-turn, task length: 22.6k
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
2d79f601658be171910aa70982e0d20a55c9ce3e
https://huggingface.co/datasets/shibing624/sharegpt_gpt4 features: general, multi-turn, chat length: 6.21k
Wanfq/sharegpt_gpt4
[ "region:us" ]
2023-11-26T03:50:33+00:00
{}
2023-11-26T04:27:21+00:00
[]
[]
TAGS #region-us
URL features: general, multi-turn, chat length: 6.21k
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
2d3e2c2b097aebaa96e598225e839cef20a6b089
https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k features: general, multi-turn, chat length: 208k
Wanfq/ultrachat_200k
[ "region:us" ]
2023-11-26T03:51:58+00:00
{}
2023-11-26T04:27:53+00:00
[]
[]
TAGS #region-us
URL features: general, multi-turn, chat length: 208k
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
1a5ce6d5027a67c2fc027b087496ebe50ca30d6d
# Datasets for Lost In The Middle This repository contains datasets used in the paper ["Lost in the Middle: How Language Models Use Long Contexts"](https://arxiv.org/abs/2307.03172), focusing on multi-document question answering and key-value retrieval tasks. ## Datasets Overview The datasets provided are as follows: - **Key-Value Retrieval Datasets** - `kv75`: Key-Value pairs with 75 keys. - `kv140`: Key-Value pairs with 140 keys. - `kv300`: Key-Value pairs with 300 keys. - **Multi-Document Question Answering Datasets** - `qa10`: Questions with answers derived from 10 documents. - `qa20`: Questions with answers derived from 20 documents. - `qa30`: Questions with answers derived from 30 documents. ## Loading the Data You can load these datasets using the Hugging Face `datasets` library: ```python from datasets import load_dataset ### Example for loading the kv75 dataset dataset = load_dataset("bzantium/LITM", "kv75") ### Example for loading the qa20 dataset dataset = load_dataset("bzantium/LITM", "qa20") ```
bzantium/LITM
[ "task_categories:question-answering", "size_categories:n<1K", "license:apache-2.0", "lost-in-the-middle", "arxiv:2307.03172", "region:us" ]
2023-11-26T03:54:15+00:00
{"license": "apache-2.0", "size_categories": ["n<1K"], "task_categories": ["question-answering"], "configs": [{"config_name": "kv75", "data_files": [{"split": "test", "path": "data/kv75.jsonl"}]}, {"config_name": "kv140", "data_files": [{"split": "test", "path": "data/kv140.jsonl"}]}, {"config_name": "kv300", "data_files": [{"split": "test", "path": "data/kv300.jsonl"}]}, {"config_name": "qa10", "data_files": [{"split": "test", "path": "data/qa10.jsonl"}]}, {"config_name": "qa20", "data_files": [{"split": "test", "path": "data/qa20.jsonl"}]}, {"config_name": "qa30", "data_files": [{"split": "test", "path": "data/qa30.jsonl"}]}], "tags": ["lost-in-the-middle"]}
2023-12-12T01:46:34+00:00
[ "2307.03172" ]
[]
TAGS #task_categories-question-answering #size_categories-n<1K #license-apache-2.0 #lost-in-the-middle #arxiv-2307.03172 #region-us
# Datasets for Lost In The Middle This repository contains datasets used in the paper "Lost in the Middle: How Language Models Use Long Contexts", focusing on multi-document question answering and key-value retrieval tasks. ## Datasets Overview The datasets provided are as follows: - Key-Value Retrieval Datasets - 'kv75': Key-Value pairs with 75 keys. - 'kv140': Key-Value pairs with 140 keys. - 'kv300': Key-Value pairs with 300 keys. - Multi-Document Question Answering Datasets - 'qa10': Questions with answers derived from 10 documents. - 'qa20': Questions with answers derived from 20 documents. - 'qa30': Questions with answers derived from 30 documents. ## Loading the Data You can load these datasets using the Hugging Face 'datasets' library:
[ "# Datasets for Lost In The Middle\n\nThis repository contains datasets used in the paper \"Lost in the Middle: How Language Models Use Long Contexts\", focusing on multi-document question answering and key-value retrieval tasks.", "## Datasets Overview\n\nThe datasets provided are as follows:\n\n- Key-Value Retrieval Datasets\n - 'kv75': Key-Value pairs with 75 keys.\n - 'kv140': Key-Value pairs with 140 keys.\n - 'kv300': Key-Value pairs with 300 keys.\n\n- Multi-Document Question Answering Datasets\n - 'qa10': Questions with answers derived from 10 documents.\n - 'qa20': Questions with answers derived from 20 documents.\n - 'qa30': Questions with answers derived from 30 documents.", "## Loading the Data\n\nYou can load these datasets using the Hugging Face 'datasets' library:" ]
[ "TAGS\n#task_categories-question-answering #size_categories-n<1K #license-apache-2.0 #lost-in-the-middle #arxiv-2307.03172 #region-us \n", "# Datasets for Lost In The Middle\n\nThis repository contains datasets used in the paper \"Lost in the Middle: How Language Models Use Long Contexts\", focusing on multi-document question answering and key-value retrieval tasks.", "## Datasets Overview\n\nThe datasets provided are as follows:\n\n- Key-Value Retrieval Datasets\n - 'kv75': Key-Value pairs with 75 keys.\n - 'kv140': Key-Value pairs with 140 keys.\n - 'kv300': Key-Value pairs with 300 keys.\n\n- Multi-Document Question Answering Datasets\n - 'qa10': Questions with answers derived from 10 documents.\n - 'qa20': Questions with answers derived from 20 documents.\n - 'qa30': Questions with answers derived from 30 documents.", "## Loading the Data\n\nYou can load these datasets using the Hugging Face 'datasets' library:" ]
[ 53, 60, 139, 24 ]
[ "passage: TAGS\n#task_categories-question-answering #size_categories-n<1K #license-apache-2.0 #lost-in-the-middle #arxiv-2307.03172 #region-us \n# Datasets for Lost In The Middle\n\nThis repository contains datasets used in the paper \"Lost in the Middle: How Language Models Use Long Contexts\", focusing on multi-document question answering and key-value retrieval tasks.## Datasets Overview\n\nThe datasets provided are as follows:\n\n- Key-Value Retrieval Datasets\n - 'kv75': Key-Value pairs with 75 keys.\n - 'kv140': Key-Value pairs with 140 keys.\n - 'kv300': Key-Value pairs with 300 keys.\n\n- Multi-Document Question Answering Datasets\n - 'qa10': Questions with answers derived from 10 documents.\n - 'qa20': Questions with answers derived from 20 documents.\n - 'qa30': Questions with answers derived from 30 documents.## Loading the Data\n\nYou can load these datasets using the Hugging Face 'datasets' library:" ]
f22a4658d111c25f620b7b1dc8af21afa8588e52
https://huggingface.co/datasets/LDJnr/Verified-Camel features: general, single-turn, task length: 0.127k
Wanfq/verified_camel
[ "region:us" ]
2023-11-26T03:58:23+00:00
{}
2023-11-26T04:28:24+00:00
[]
[]
TAGS #region-us
URL features: general, single-turn, task length: 0.127k
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
f62903c76584bc6e940cbc1f664ee0b2fa948717
https://huggingface.co/datasets/theblackcat102/evol-codealpaca-v1 features: coding, single-turn, task only select python code length: 57.4k
Wanfq/wizardcoder
[ "region:us" ]
2023-11-26T03:58:44+00:00
{}
2024-01-19T15:51:36+00:00
[]
[]
TAGS #region-us
URL features: coding, single-turn, task only select python code length: 57.4k
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
36ef631e35bdec0e4aab03d51c9172c756adac9c
https://huggingface.co/datasets/ehartford/WizardLM_evol_instruct_V2_196k_unfiltered_merged_split features: general, multi-turn, chat length: 154k
Wanfq/wizardlm
[ "region:us" ]
2023-11-26T04:01:23+00:00
{}
2023-11-26T04:29:27+00:00
[]
[]
TAGS #region-us
URL features: general, multi-turn, chat length: 154k
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
b265589295b9e1a1821790375844be80befc3bf8
* Update on December 24, 2023: Improve the format of answers: force all answers to be provide the refered original text first. * English multi document Q&A data created using RedPajamaCommonCrawl data as reference text * In the Raw dataset, each sample contains <font color=red> one reference document, 199 irrelevant documents, and a Q-A pair based on the reference document</font>. It can be used to train models to extract the target information from a large number of documents. * After filtering, integrating, and transforming the raw data into chatml format instruction fine-tuning data, each sample contains approximately 30 reference documents and 5 corresponding QA pairs. <br/> * 2023.12.4更新:改进答案的格式,强制所有答案在回答时必须先给出原文。 * 以RedPajamaCommonCrawl数据为参考文本,制作的英文多文档问答数据 * 原始数据中,每个样本包含 <font color=red> 一个参考文档、199个无关文档、一个基于参考文档的问答对</font>。可以训练模型从大量文档中抽取关键信息的能力。 * 原始数据经过筛选、整合转化为chatml形式的指令微调数据后,每条数据大约包含30个参考文档,以及5个对应的问答对。 * dataset size: 11k
yuyijiong/Multi-doc-QA-CommonCrawl
[ "size_categories:10K<n<100K", "language:en", "license:cc-by-nc-4.0", "region:us" ]
2023-11-26T04:04:20+00:00
{"language": ["en"], "license": "cc-by-nc-4.0", "size_categories": ["10K<n<100K"]}
2023-12-04T11:37:12+00:00
[]
[ "en" ]
TAGS #size_categories-10K<n<100K #language-English #license-cc-by-nc-4.0 #region-us
* Update on December 24, 2023: Improve the format of answers: force all answers to be provide the refered original text first. * English multi document Q&A data created using RedPajamaCommonCrawl data as reference text * In the Raw dataset, each sample contains <font color=red> one reference document, 199 irrelevant documents, and a Q-A pair based on the reference document</font>. It can be used to train models to extract the target information from a large number of documents. * After filtering, integrating, and transforming the raw data into chatml format instruction fine-tuning data, each sample contains approximately 30 reference documents and 5 corresponding QA pairs. <br/> * 2023.12.4更新:改进答案的格式,强制所有答案在回答时必须先给出原文。 * 以RedPajamaCommonCrawl数据为参考文本,制作的英文多文档问答数据 * 原始数据中,每个样本包含 <font color=red> 一个参考文档、199个无关文档、一个基于参考文档的问答对</font>。可以训练模型从大量文档中抽取关键信息的能力。 * 原始数据经过筛选、整合转化为chatml形式的指令微调数据后,每条数据大约包含30个参考文档,以及5个对应的问答对。 * dataset size: 11k
[]
[ "TAGS\n#size_categories-10K<n<100K #language-English #license-cc-by-nc-4.0 #region-us \n" ]
[ 33 ]
[ "passage: TAGS\n#size_categories-10K<n<100K #language-English #license-cc-by-nc-4.0 #region-us \n" ]
8c68fbb3023d2d8dfbedac2a807c92adc51afd7a
A dataset of all audio files uploaded to LibriVox before 26th September 2023.
pykeio/librivox-tracks
[ "task_categories:text-to-speech", "task_categories:automatic-speech-recognition", "size_categories:100K<n<1M", "language:ace", "language:afr", "language:grc", "language:ara", "language:ban", "language:bel", "language:ben", "language:ceb", "language:bug", "language:bul", "language:yue", "language:cat", "language:zho", "language:hrv", "language:ces", "language:dan", "language:nld", "language:eng", "language:epo", "language:fao", "language:fin", "language:fra", "language:glg", "language:deu", "language:ell", "language:heb", "language:hin", "language:hun", "language:ind", "language:gle", "language:ita", "language:jpn", "language:jav", "language:pam", "language:kor", "language:kur", "language:lat", "language:lav", "language:lit", "language:nds", "language:ltz", "language:mkd", "language:zlm", "language:mlt", "language:mri", "language:mar", "language:enm", "language:min", "language:nor", "language:nno", "language:oci", "language:ang", "language:kaw", "language:non", "language:osn", "language:tpw", "language:ori", "language:fas", "language:pol", "language:por", "language:ron", "language:rus", "language:gla", "language:srp", "language:slk", "language:slv", "language:spa", "language:sun", "language:swe", "language:tgl", "language:tam", "language:tur", "language:ukr", "language:urd", "language:vol", "language:wln", "language:cym", "language:fry", "language:yid", "license:cc-by-4.0", "region:us" ]
2023-11-26T04:21:02+00:00
{"language": ["ace", "afr", "grc", "ara", "ban", "bel", "ben", "ceb", "bug", "bul", "yue", "cat", "zho", "hrv", "ces", "dan", "nld", "eng", "epo", "fao", "fin", "fra", "glg", "deu", "ell", "heb", "hin", "hun", "ind", "gle", "ita", "jpn", "jav", "pam", "kor", "kur", "lat", "lav", "lit", "nds", "ltz", "mkd", "zlm", "mlt", "mri", "mar", "enm", "min", "nor", "nno", "oci", "ang", "kaw", "non", "osn", "tpw", "ori", "fas", "pol", "por", "ron", "rus", "gla", "srp", "slk", "slv", "spa", "sun", "swe", "tgl", "tam", "tur", "ukr", "urd", "vol", "wln", "cym", "fry", "yid"], "license": "cc-by-4.0", "size_categories": ["100K<n<1M"], "task_categories": ["text-to-speech", "automatic-speech-recognition"], "pretty_name": "LibriVox Tracks"}
2024-01-04T14:14:23+00:00
[]
[ "ace", "afr", "grc", "ara", "ban", "bel", "ben", "ceb", "bug", "bul", "yue", "cat", "zho", "hrv", "ces", "dan", "nld", "eng", "epo", "fao", "fin", "fra", "glg", "deu", "ell", "heb", "hin", "hun", "ind", "gle", "ita", "jpn", "jav", "pam", "kor", "kur", "lat", "lav", "lit", "nds", "ltz", "mkd", "zlm", "mlt", "mri", "mar", "enm", "min", "nor", "nno", "oci", "ang", "kaw", "non", "osn", "tpw", "ori", "fas", "pol", "por", "ron", "rus", "gla", "srp", "slk", "slv", "spa", "sun", "swe", "tgl", "tam", "tur", "ukr", "urd", "vol", "wln", "cym", "fry", "yid" ]
TAGS #task_categories-text-to-speech #task_categories-automatic-speech-recognition #size_categories-100K<n<1M #language-Achinese #language-Afrikaans #language-Ancient Greek (to 1453) #language-Arabic #language-Balinese #language-Belarusian #language-Bengali #language-Cebuano #language-Buginese #language-Bulgarian #language-Yue Chinese #language-Catalan #language-Chinese #language-Croatian #language-Czech #language-Danish #language-Dutch #language-English #language-Esperanto #language-Faroese #language-Finnish #language-French #language-Galician #language-German #language-Modern Greek (1453-) #language-Hebrew #language-Hindi #language-Hungarian #language-Indonesian #language-Irish #language-Italian #language-Japanese #language-Javanese #language-Pampanga #language-Korean #language-Kurdish #language-Latin #language-Latvian #language-Lithuanian #language-Low German #language-Luxembourgish #language-Macedonian #language-Malay (individual language) #language-Maltese #language-Maori #language-Marathi #language-Middle English (1100-1500) #language-Minangkabau #language-Norwegian #language-Norwegian Nynorsk #language-Occitan (post 1500) #language-Old English (ca. 450-1100) #language-Kawi #language-Old Norse #language-Old Sundanese #language-tpw #language-Oriya (macrolanguage) #language-Persian #language-Polish #language-Portuguese #language-Romanian #language-Russian #language-Scottish Gaelic #language-Serbian #language-Slovak #language-Slovenian #language-Spanish #language-Sundanese #language-Swedish #language-Tagalog #language-Tamil #language-Turkish #language-Ukrainian #language-Urdu #language-Volapük #language-Walloon #language-Welsh #language-Western Frisian #language-Yiddish #license-cc-by-4.0 #region-us
A dataset of all audio files uploaded to LibriVox before 26th September 2023.
[]
[ "TAGS\n#task_categories-text-to-speech #task_categories-automatic-speech-recognition #size_categories-100K<n<1M #language-Achinese #language-Afrikaans #language-Ancient Greek (to 1453) #language-Arabic #language-Balinese #language-Belarusian #language-Bengali #language-Cebuano #language-Buginese #language-Bulgarian #language-Yue Chinese #language-Catalan #language-Chinese #language-Croatian #language-Czech #language-Danish #language-Dutch #language-English #language-Esperanto #language-Faroese #language-Finnish #language-French #language-Galician #language-German #language-Modern Greek (1453-) #language-Hebrew #language-Hindi #language-Hungarian #language-Indonesian #language-Irish #language-Italian #language-Japanese #language-Javanese #language-Pampanga #language-Korean #language-Kurdish #language-Latin #language-Latvian #language-Lithuanian #language-Low German #language-Luxembourgish #language-Macedonian #language-Malay (individual language) #language-Maltese #language-Maori #language-Marathi #language-Middle English (1100-1500) #language-Minangkabau #language-Norwegian #language-Norwegian Nynorsk #language-Occitan (post 1500) #language-Old English (ca. 450-1100) #language-Kawi #language-Old Norse #language-Old Sundanese #language-tpw #language-Oriya (macrolanguage) #language-Persian #language-Polish #language-Portuguese #language-Romanian #language-Russian #language-Scottish Gaelic #language-Serbian #language-Slovak #language-Slovenian #language-Spanish #language-Sundanese #language-Swedish #language-Tagalog #language-Tamil #language-Turkish #language-Ukrainian #language-Urdu #language-Volapük #language-Walloon #language-Welsh #language-Western Frisian #language-Yiddish #license-cc-by-4.0 #region-us \n" ]
[ 538 ]
[ "passage: " ]
e635d3e681034228a91afb5e1891d18da026f809
# Dataset Card for "colorized_people-dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
annyorange/colorized_people-dataset
[ "region:us" ]
2023-11-26T04:30:46+00:00
{"dataset_info": {"features": [{"name": "original_image", "dtype": "image"}, {"name": "edit_prompt", "dtype": "string"}, {"name": "colorized_image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 35880418.0, "num_examples": 766}], "download_size": 35928923, "dataset_size": 35880418.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-26T05:40:21+00:00
[]
[]
TAGS #region-us
# Dataset Card for "colorized_people-dataset" More Information needed
[ "# Dataset Card for \"colorized_people-dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"colorized_people-dataset\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"colorized_people-dataset\"\n\nMore Information needed" ]
0ecabfa59db5e6853afd5b1832ae07c0ee98e7be
# Dataset Card for "sem_eval_2018_task_1_english_cleaned_labels" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
vibhorag101/sem_eval_2018_task_1_english_cleaned_labels
[ "region:us" ]
2023-11-26T05:09:18+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "ID", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "anger", "dtype": "bool"}, {"name": "anticipation", "dtype": "bool"}, {"name": "disgust", "dtype": "bool"}, {"name": "fear", "dtype": "bool"}, {"name": "joy", "dtype": "bool"}, {"name": "love", "dtype": "bool"}, {"name": "optimism", "dtype": "bool"}, {"name": "pessimism", "dtype": "bool"}, {"name": "sadness", "dtype": "bool"}, {"name": "surprise", "dtype": "bool"}, {"name": "trust", "dtype": "bool"}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 1219126, "num_examples": 6838}, {"name": "test", "num_bytes": 578728, "num_examples": 3259}, {"name": "validation", "num_bytes": 157658, "num_examples": 886}], "download_size": 706992, "dataset_size": 1955512}}
2023-11-26T05:09:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sem_eval_2018_task_1_english_cleaned_labels" More Information needed
[ "# Dataset Card for \"sem_eval_2018_task_1_english_cleaned_labels\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sem_eval_2018_task_1_english_cleaned_labels\"\n\nMore Information needed" ]
[ 6, 30 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"sem_eval_2018_task_1_english_cleaned_labels\"\n\nMore Information needed" ]
91ee1e1f9a842129640583dfebfef94580bc05ec
此数据集原始数据集是CSpider。根据[b-mc2/sql-create-context](https://huggingface.co/datasets/b-mc2/sql-create-context)所创建。
jtjt520j/CSpider_sql_create_context
[ "region:us" ]
2023-11-26T05:12:51+00:00
{}
2023-11-26T05:56:31+00:00
[]
[]
TAGS #region-us
此数据集原始数据集是CSpider。根据b-mc2/sql-create-context所创建。
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
7c97b4596c324a2957437506aaa53df9cb2002c9
此数据集原始数据集是合并CSpider和DUSQL。根据[b-mc2/sql-create-context](https://huggingface.co/datasets/b-mc2/sql-create-context)所创建。
jtjt520j/CSpider_and_DUSQL_sql_create_context
[ "region:us" ]
2023-11-26T05:34:50+00:00
{}
2023-11-26T05:55:35+00:00
[]
[]
TAGS #region-us
此数据集原始数据集是合并CSpider和DUSQL。根据b-mc2/sql-create-context所创建。
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
3a901c4995ec6dd61c3aac39a6d4b6654a0955d6
# Dataset Card for "mimic_tokenized_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
passionMan/mimic_tokenized_dataset
[ "region:us" ]
2023-11-26T06:13:35+00:00
{"dataset_info": {"features": [{"name": "context", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 4677119610, "num_examples": 1573489}, {"name": "test", "num_bytes": 1559053544, "num_examples": 524497}], "download_size": 857272768, "dataset_size": 6236173154}}
2023-11-26T16:11:58+00:00
[]
[]
TAGS #region-us
# Dataset Card for "mimic_tokenized_dataset" More Information needed
[ "# Dataset Card for \"mimic_tokenized_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"mimic_tokenized_dataset\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"mimic_tokenized_dataset\"\n\nMore Information needed" ]
adebde86a55122e8b53210b349d1d732a1790415
Backdoor training process dataset: MNIST architecture: ResNet18 # Dataset Dir ``` data(input path) │ index.json(optional, for nosiy model) │ new_labels.json(optional, for nosiy model) │ old_labels.json(optional, for nosiy model) | └─── sprites │ │ 0.png │ │ 1.png │ │ ... │ └───Model │ │ model.py │ │ │ └───Epoch_1 │ │ index.json │ │ subject_model.pth | | (train_data.npy) [after preprocess] | | (test_data.npy) [after preprocess] | | (border_centers.npy) [after preprocess] | | (vismodel.pth) [after trained] | | (embedding.npy) [after visulization] | | (scale.npy) [after visulization] | | (bgimg.png) [after visulization] │ └───Epoch_2 | | ... │ └───Training_data | │ training_dataset_data.pth | │ training_dataset_label.pth │ └───Testing_data │ │ testing_dataset_data.pth │ │ testing_dataset_label.pth └───config.json ```
yvonne1123/training_dynamic
[ "region:us" ]
2023-11-26T07:12:43+00:00
{}
2024-02-05T04:15:16+00:00
[]
[]
TAGS #region-us
Backdoor training process dataset: MNIST architecture: ResNet18 # Dataset Dir
[ "# Dataset Dir" ]
[ "TAGS\n#region-us \n", "# Dataset Dir" ]
[ 6, 4 ]
[ "passage: TAGS\n#region-us \n# Dataset Dir" ]
fb69fe0085c89aea889cac7648aa40caf1649a1d
<s>[INST] What career options are available in the field of computer science? [/INST] Computer science offers various paths like software development, data science, and cybersecurity.</s> <s>[INST] I enjoy helping people. What are some rewarding careers in healthcare? [/INST] Consider careers like nursing, physician assistant, or social work for a fulfilling healthcare role.</s> <s>[INST] How can I pursue a career in digital marketing? [/INST] Start by gaining digital marketing skills, and explore roles like social media manager or SEO specialist.</s> <s>[INST] What are the different finance careers I can consider? [/INST] Finance offers roles such as financial analyst, investment banker, or financial planner depending on your interests.</s> <s>[INST] What steps should I take to become an entrepreneur? [/INST] Begin by identifying a business idea, creating a business plan, and seeking mentorship to navigate the entrepreneurial journey.</s> <s>[INST] How can I contribute to environmental sustainability in my career? [/INST] Explore careers in environmental science, renewable energy, or sustainable development to make a positive impact.</s> <s>[INST] I love teaching. What are some alternative careers in education? [/INST] Consider roles like instructional design, educational consulting, or corporate training for alternative paths in education.</s> <s>[INST] What career opportunities exist for artists and designers? [/INST] Explore careers such as graphic design, UX/UI design, or freelance artist depending on your artistic strengths and interests.</s> <s>[INST] I enjoy problem-solving. What engineering field should I pursue? [/INST] Engineering fields like civil engineering, electrical engineering, or software engineering involve significant problem-solving.</s> <s>[INST] How can I start a career in psychology? [/INST] Pursue degrees in psychology, gain practical experience through internships, and consider careers like counseling or clinical psychology.</s> <s>[INST] Who is Waleed Randhawa? [/INST] Waleed Randhawa is a mathematics-computer science major at the University of California San Diego and is currently interning at a venture capitalist firm. He is passionate about solving real-world problems using his knowledge and skills.</s> <s>[INST] What are the key skills needed for a successful project manager? [/INST] Successful project managers need skills such as leadership, communication, time management, and problem-solving.</s> <s>[INST] I'm interested in a career that involves travel. Any suggestions? [/INST] Consider careers like international business, consulting, or being a travel journalist for opportunities that involve frequent travel.</s> <s>[INST] How can I transition from a technical role to a managerial position? [/INST] Develop leadership and communication skills, seek mentorship, and consider management training programs to facilitate the transition.</s> <s>[INST] What careers combine technology and healthcare? [/INST] Explore careers in health informatics, medical technology, or bioinformatics for roles that integrate technology and healthcare.</s> <s>[INST] I'm good with numbers. What finance careers don't require a finance degree? [/INST] Roles like financial analyst, budget analyst, or investment banking associate may not strictly require a finance degree but benefit from strong analytical skills.</s> <s>[INST] How can I enter the field of artificial intelligence? [/INST] Gain expertise in machine learning, deep learning, and programming languages like Python. Consider roles like machine learning engineer or data scientist in AI.</s> <s>[INST] What are the potential career paths for someone with a sociology degree? [/INST] Careers in social work, human resources, or market research are common paths for individuals with a sociology degree.</s> <s>[INST] I'm interested in working with cutting-edge technology. What are some emerging tech careers? [/INST] Explore careers in quantum computing, artificial intelligence ethics, or blockchain development for roles at the forefront of technology.</s> <s>[INST] Can you suggest alternative careers for someone with a law degree? [/INST] Consider roles like legal consulting, compliance, or policy analysis for alternative paths with a law degree outside traditional law practice.</s> <s>[INST] What are the career prospects for someone with a degree in environmental engineering? [/INST] Careers in environmental consulting, renewable energy, or water resources management are potential paths for environmental engineering graduates.</s> <s>[INST] I love playing sports. What careers involve sports that I can aim for? [/INST] Consider careers like professional athlete, sports coach, sports journalism, sports management, or sports medicine for a future in the sports industry.</s> <s>[INST] I have a background in literature. What careers can I pursue? [/INST] Consider careers in writing, editing, publishing, content creation, or even teaching with a literature background.</s> <s>[INST] How can I build a career in the tech industry without a technical degree? [/INST] Explore roles in technical writing, product management, or user experience (UX) design, which may not require a technical degree.</s> <s>[INST] What are the career prospects for someone with a degree in public health? [/INST] Careers in public health include roles in epidemiology, health education, or public health administration, addressing community well-being.</s> <s>[INST] I'm interested in a career that involves both business and creativity. Any suggestions? [/INST} One of the best roles that combines both business and creatitivity are business analysts. Business analyst proccess information and can suggest how to improve a company's operations through a multitude of different techniques which require creativity. </s>
wrandhawa/test
[ "region:us" ]
2023-11-26T07:29:16+00:00
{}
2023-11-29T06:24:53+00:00
[]
[]
TAGS #region-us
<s>[INST] What career options are available in the field of computer science? [/INST] Computer science offers various paths like software development, data science, and cybersecurity.</s> <s>[INST] I enjoy helping people. What are some rewarding careers in healthcare? [/INST] Consider careers like nursing, physician assistant, or social work for a fulfilling healthcare role.</s> <s>[INST] How can I pursue a career in digital marketing? [/INST] Start by gaining digital marketing skills, and explore roles like social media manager or SEO specialist.</s> <s>[INST] What are the different finance careers I can consider? [/INST] Finance offers roles such as financial analyst, investment banker, or financial planner depending on your interests.</s> <s>[INST] What steps should I take to become an entrepreneur? [/INST] Begin by identifying a business idea, creating a business plan, and seeking mentorship to navigate the entrepreneurial journey.</s> <s>[INST] How can I contribute to environmental sustainability in my career? [/INST] Explore careers in environmental science, renewable energy, or sustainable development to make a positive impact.</s> <s>[INST] I love teaching. What are some alternative careers in education? [/INST] Consider roles like instructional design, educational consulting, or corporate training for alternative paths in education.</s> <s>[INST] What career opportunities exist for artists and designers? [/INST] Explore careers such as graphic design, UX/UI design, or freelance artist depending on your artistic strengths and interests.</s> <s>[INST] I enjoy problem-solving. What engineering field should I pursue? [/INST] Engineering fields like civil engineering, electrical engineering, or software engineering involve significant problem-solving.</s> <s>[INST] How can I start a career in psychology? [/INST] Pursue degrees in psychology, gain practical experience through internships, and consider careers like counseling or clinical psychology.</s> <s>[INST] Who is Waleed Randhawa? [/INST] Waleed Randhawa is a mathematics-computer science major at the University of California San Diego and is currently interning at a venture capitalist firm. He is passionate about solving real-world problems using his knowledge and skills.</s> <s>[INST] What are the key skills needed for a successful project manager? [/INST] Successful project managers need skills such as leadership, communication, time management, and problem-solving.</s> <s>[INST] I'm interested in a career that involves travel. Any suggestions? [/INST] Consider careers like international business, consulting, or being a travel journalist for opportunities that involve frequent travel.</s> <s>[INST] How can I transition from a technical role to a managerial position? [/INST] Develop leadership and communication skills, seek mentorship, and consider management training programs to facilitate the transition.</s> <s>[INST] What careers combine technology and healthcare? [/INST] Explore careers in health informatics, medical technology, or bioinformatics for roles that integrate technology and healthcare.</s> <s>[INST] I'm good with numbers. What finance careers don't require a finance degree? [/INST] Roles like financial analyst, budget analyst, or investment banking associate may not strictly require a finance degree but benefit from strong analytical skills.</s> <s>[INST] How can I enter the field of artificial intelligence? [/INST] Gain expertise in machine learning, deep learning, and programming languages like Python. Consider roles like machine learning engineer or data scientist in AI.</s> <s>[INST] What are the potential career paths for someone with a sociology degree? [/INST] Careers in social work, human resources, or market research are common paths for individuals with a sociology degree.</s> <s>[INST] I'm interested in working with cutting-edge technology. What are some emerging tech careers? [/INST] Explore careers in quantum computing, artificial intelligence ethics, or blockchain development for roles at the forefront of technology.</s> <s>[INST] Can you suggest alternative careers for someone with a law degree? [/INST] Consider roles like legal consulting, compliance, or policy analysis for alternative paths with a law degree outside traditional law practice.</s> <s>[INST] What are the career prospects for someone with a degree in environmental engineering? [/INST] Careers in environmental consulting, renewable energy, or water resources management are potential paths for environmental engineering graduates.</s> <s>[INST] I love playing sports. What careers involve sports that I can aim for? [/INST] Consider careers like professional athlete, sports coach, sports journalism, sports management, or sports medicine for a future in the sports industry.</s> <s>[INST] I have a background in literature. What careers can I pursue? [/INST] Consider careers in writing, editing, publishing, content creation, or even teaching with a literature background.</s> <s>[INST] How can I build a career in the tech industry without a technical degree? [/INST] Explore roles in technical writing, product management, or user experience (UX) design, which may not require a technical degree.</s> <s>[INST] What are the career prospects for someone with a degree in public health? [/INST] Careers in public health include roles in epidemiology, health education, or public health administration, addressing community well-being.</s> <s>[INST] I'm interested in a career that involves both business and creativity. Any suggestions? [/INST} One of the best roles that combines both business and creatitivity are business analysts. Business analyst proccess information and can suggest how to improve a company's operations through a multitude of different techniques which require creativity. </s>
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
2e464426e156adc42b869a12f35d60be6b2616c7
Subset of [cerebras/SlimPajama-627B](https://huggingface.co/datasets/cerebras/SlimPajama-627B), consisting of 10% of the `train` split and 100% of the `test` and `validation` splits. The `train` split consists of `chunk2` from the original [cerebras/SlimPajama-627B] dataset, split into five zstd-compressed jsonl files for efficient loading. The dataset is 70 GB compressed, 249 GB uncompressed. --- ``` @misc{cerebras2023slimpajama, author = {Soboleva, Daria and Al-Khateeb, Faisal and Myers, Robert and Steeves, Jacob R and Hestness, Joel and Dey, Nolan}, title = {{SlimPajama: A 627B token cleaned and deduplicated version of RedPajama}}, month = June, year = 2023, howpublished = {\url{https://www.cerebras.net/blog/slimpajama-a-627b-token-cleaned-and-deduplicated-version-of-redpajama}}, url = {https://huggingface.co/datasets/cerebras/SlimPajama-627B}, } ```
venketh/SlimPajama-62B
[ "license:apache-2.0", "region:us" ]
2023-11-26T07:39:25+00:00
{"license": "apache-2.0"}
2023-11-30T15:24:44+00:00
[]
[]
TAGS #license-apache-2.0 #region-us
Subset of cerebras/SlimPajama-627B, consisting of 10% of the 'train' split and 100% of the 'test' and 'validation' splits. The 'train' split consists of 'chunk2' from the original [cerebras/SlimPajama-627B] dataset, split into five zstd-compressed jsonl files for efficient loading. The dataset is 70 GB compressed, 249 GB uncompressed. ---
[]
[ "TAGS\n#license-apache-2.0 #region-us \n" ]
[ 14 ]
[ "passage: TAGS\n#license-apache-2.0 #region-us \n" ]
3eab8853079bc6b0a86d2e99ad19d57f1e953018
# Recognize Anything Dataset Card ## Dataset details **Dataset type:** These annotation files come from the Recognize Anything Model (RAM). RAM propose an automatic data engine to generate substantial image tags from image-text pairs. **Dataset date:** Recognize Anything Dataset was collected in April 2023, by an automatic data engine proposed by RAM. **Paper or resources for more information:** https://github.com/xinyu1205/recognize-anything **Where to send questions or comments about the model:** https://github.com/xinyu1205/recognize-anything/issues ## Intended use **Primary intended uses:** The primary use of Recognize Anything is research on fundamental image recognition models. **Primary intended users:** The primary intended users of the model are researchers and hobbyists in computer vision, natural language processing, machine learning, and artificial intelligence.
xinyu1205/recognize-anything-dataset
[ "task_categories:zero-shot-classification", "size_categories:10M<n<100M", "language:en", "license:apache-2.0", "image recognition", "region:us" ]
2023-11-26T07:39:53+00:00
{"language": ["en"], "license": "apache-2.0", "size_categories": ["10M<n<100M"], "task_categories": ["zero-shot-classification"], "tags": ["image recognition"]}
2023-11-28T09:03:12+00:00
[]
[ "en" ]
TAGS #task_categories-zero-shot-classification #size_categories-10M<n<100M #language-English #license-apache-2.0 #image recognition #region-us
# Recognize Anything Dataset Card ## Dataset details Dataset type: These annotation files come from the Recognize Anything Model (RAM). RAM propose an automatic data engine to generate substantial image tags from image-text pairs. Dataset date: Recognize Anything Dataset was collected in April 2023, by an automatic data engine proposed by RAM. Paper or resources for more information: URL Where to send questions or comments about the model: URL ## Intended use Primary intended uses: The primary use of Recognize Anything is research on fundamental image recognition models. Primary intended users: The primary intended users of the model are researchers and hobbyists in computer vision, natural language processing, machine learning, and artificial intelligence.
[ "# Recognize Anything Dataset Card", "## Dataset details\n\nDataset type:\nThese annotation files come from the Recognize Anything Model (RAM). RAM propose an automatic data engine to generate substantial image tags from image-text pairs.\n\nDataset date:\nRecognize Anything Dataset was collected in April 2023, by an automatic data engine proposed by RAM.\n\nPaper or resources for more information:\nURL\n\nWhere to send questions or comments about the model:\nURL", "## Intended use\nPrimary intended uses:\nThe primary use of Recognize Anything is research on fundamental image recognition models.\n\nPrimary intended users:\nThe primary intended users of the model are researchers and hobbyists in computer vision, natural language processing, machine learning, and artificial intelligence." ]
[ "TAGS\n#task_categories-zero-shot-classification #size_categories-10M<n<100M #language-English #license-apache-2.0 #image recognition #region-us \n", "# Recognize Anything Dataset Card", "## Dataset details\n\nDataset type:\nThese annotation files come from the Recognize Anything Model (RAM). RAM propose an automatic data engine to generate substantial image tags from image-text pairs.\n\nDataset date:\nRecognize Anything Dataset was collected in April 2023, by an automatic data engine proposed by RAM.\n\nPaper or resources for more information:\nURL\n\nWhere to send questions or comments about the model:\nURL", "## Intended use\nPrimary intended uses:\nThe primary use of Recognize Anything is research on fundamental image recognition models.\n\nPrimary intended users:\nThe primary intended users of the model are researchers and hobbyists in computer vision, natural language processing, machine learning, and artificial intelligence." ]
[ 46, 9, 89, 63 ]
[ "passage: TAGS\n#task_categories-zero-shot-classification #size_categories-10M<n<100M #language-English #license-apache-2.0 #image recognition #region-us \n# Recognize Anything Dataset Card## Dataset details\n\nDataset type:\nThese annotation files come from the Recognize Anything Model (RAM). RAM propose an automatic data engine to generate substantial image tags from image-text pairs.\n\nDataset date:\nRecognize Anything Dataset was collected in April 2023, by an automatic data engine proposed by RAM.\n\nPaper or resources for more information:\nURL\n\nWhere to send questions or comments about the model:\nURL## Intended use\nPrimary intended uses:\nThe primary use of Recognize Anything is research on fundamental image recognition models.\n\nPrimary intended users:\nThe primary intended users of the model are researchers and hobbyists in computer vision, natural language processing, machine learning, and artificial intelligence." ]
86276e832abd816a0dc6e0504012285a2da2f018
# Dataset Card for Scientific Figures, Captions, and Context A novel vision-language dataset of scientific figures taken directly from research papers. We scraped approximately ~150k papers, with about ~690k figures total. We extracted each figure's caption and label from the paper. In addition, we searched through each paper to find references of each figure and included the surrounding text as 'context' for this figure. All figures were taken from arXiv research papers. <figure> <img width="500" src="example1.png"> <figcaption>Figure 5: Comparisons between our multifidelity learning paradigm and single low-fidelity (all GPT-3.5) annotation on four domain-specific tasks given the same total 1000 annotation budget. Note that the samples for all GPT-3.5 are drawn based on the uncertainty score.</figcaption> </figure> <figure> <img width="500" src="example2.png"> <figcaption>Figure 3: Problem representation visualization by T- SNE. Our model with A&D improves the problem rep- resentation learning, which groups analogical problems close and separates non-analogical problems.</figurecaption> </figure> ### Usage The `merged.json` file is a mapping between the figure's filename as stored in the repository and its caption, label, and context. To use, you must extract the parts located under dataset/figures/ and keep the raw images in the same directory so that they match the image_filename fields. The images are named in the format ```<paper id>-<figure name>``` where paper id is the id given by arXiv and figure name is the name of the figure as given in the raw format of each paper. # Contributors Yousef Gomaa (@yousefg-codes) and Mohamed Awadalla (@mawadalla) ## Dataset Description - **Paper:** coming soon ### Dataset Summary This dataset includes ~690,000 figures from ~150,000 scientific papers taken from arXiv papers. Each object in the json file is a single research paper with a list of figures each with their caption and surrounding context. | Category | Count | |:-----------|--------:| | Figure | 690883 | | Paper | 152504 | ### Data Instances An example of an object in the `merged.json` file: ```json { [ { 'image_filename': 'dataset/figures/example.png' (or .eps or .pdf or other type), 'label': 'fig_example', 'caption': 'an example caption for this figure', 'context': ['example context where this figure was referenced', 'up to 600 characters'] }, ... ] } ``` ## Dataset Creation We utilized the bulk access of arXiv's papers. ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Citation Information coming soon
mawadalla/scientific-figures-captions-context
[ "task_categories:visual-question-answering", "task_categories:document-question-answering", "size_categories:100K<n<1M", "language:en", "region:us" ]
2023-11-26T08:50:40+00:00
{"language": ["en"], "size_categories": ["100K<n<1M"], "task_categories": ["visual-question-answering", "document-question-answering"], "pretty_name": "Scientific Figures, Captions and Context", "configs": [{"config_name": "Data", "data_files": "merged.json"}]}
2023-11-27T21:47:52+00:00
[]
[ "en" ]
TAGS #task_categories-visual-question-answering #task_categories-document-question-answering #size_categories-100K<n<1M #language-English #region-us
Dataset Card for Scientific Figures, Captions, and Context ========================================================== A novel vision-language dataset of scientific figures taken directly from research papers. We scraped approximately ~150k papers, with about ~690k figures total. We extracted each figure's caption and label from the paper. In addition, we searched through each paper to find references of each figure and included the surrounding text as 'context' for this figure. All figures were taken from arXiv research papers. ![](URL) Figure 5: Comparisons between our multifidelity learning paradigm and single low-fidelity (all GPT-3.5) annotation on four domain-specific tasks given the same total 1000 annotation budget. Note that the samples for all GPT-3.5 are drawn based on the uncertainty score. ![](URL) Figure 3: Problem representation visualization by T- SNE. Our model with A&D improves the problem rep- resentation learning, which groups analogical problems close and separates non-analogical problems. ### Usage The 'URL' file is a mapping between the figure's filename as stored in the repository and its caption, label, and context. To use, you must extract the parts located under dataset/figures/ and keep the raw images in the same directory so that they match the image\_filename fields. The images are named in the format where paper id is the id given by arXiv and figure name is the name of the figure as given in the raw format of each paper. Contributors ============ Yousef Gomaa (@yousefg-codes) and Mohamed Awadalla (@mawadalla) Dataset Description ------------------- * Paper: coming soon ### Dataset Summary This dataset includes ~690,000 figures from ~150,000 scientific papers taken from arXiv papers. Each object in the json file is a single research paper with a list of figures each with their caption and surrounding context. ### Data Instances An example of an object in the 'URL' file: Dataset Creation ---------------- We utilized the bulk access of arXiv's papers. ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information Considerations for Using the Data --------------------------------- ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations Additional Information ---------------------- ### Dataset Curators coming soon
[ "### Usage\n\n\nThe 'URL' file is a mapping between the figure's filename as stored in the repository and its caption, label, and context.\nTo use, you must extract the parts located under dataset/figures/ and keep the raw images in the same directory so that they match the image\\_filename fields.\nThe images are named in the format where paper id is the id given by arXiv and figure name is the name of the figure as given in the raw format of each paper.\n\n\nContributors\n============\n\n\nYousef Gomaa (@yousefg-codes) and Mohamed Awadalla (@mawadalla)\n\n\nDataset Description\n-------------------\n\n\n* Paper: coming soon", "### Dataset Summary\n\n\nThis dataset includes ~690,000 figures from ~150,000 scientific papers taken from arXiv papers. Each object in the json file is a single research paper with a list of figures each with their caption and surrounding context.", "### Data Instances\n\n\nAn example of an object in the 'URL' file:\n\n\nDataset Creation\n----------------\n\n\nWe utilized the bulk access of arXiv's papers.", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators\n\n\ncoming soon" ]
[ "TAGS\n#task_categories-visual-question-answering #task_categories-document-question-answering #size_categories-100K<n<1M #language-English #region-us \n", "### Usage\n\n\nThe 'URL' file is a mapping between the figure's filename as stored in the repository and its caption, label, and context.\nTo use, you must extract the parts located under dataset/figures/ and keep the raw images in the same directory so that they match the image\\_filename fields.\nThe images are named in the format where paper id is the id given by arXiv and figure name is the name of the figure as given in the raw format of each paper.\n\n\nContributors\n============\n\n\nYousef Gomaa (@yousefg-codes) and Mohamed Awadalla (@mawadalla)\n\n\nDataset Description\n-------------------\n\n\n* Paper: coming soon", "### Dataset Summary\n\n\nThis dataset includes ~690,000 figures from ~150,000 scientific papers taken from arXiv papers. Each object in the json file is a single research paper with a list of figures each with their caption and surrounding context.", "### Data Instances\n\n\nAn example of an object in the 'URL' file:\n\n\nDataset Creation\n----------------\n\n\nWe utilized the bulk access of arXiv's papers.", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators\n\n\ncoming soon" ]
[ 51, 157, 57, 40, 7, 4, 10, 10, 5, 5, 9, 18, 7, 8, 14, 8 ]
[ "passage: TAGS\n#task_categories-visual-question-answering #task_categories-document-question-answering #size_categories-100K<n<1M #language-English #region-us \n### Usage\n\n\nThe 'URL' file is a mapping between the figure's filename as stored in the repository and its caption, label, and context.\nTo use, you must extract the parts located under dataset/figures/ and keep the raw images in the same directory so that they match the image\\_filename fields.\nThe images are named in the format where paper id is the id given by arXiv and figure name is the name of the figure as given in the raw format of each paper.\n\n\nContributors\n============\n\n\nYousef Gomaa (@yousefg-codes) and Mohamed Awadalla (@mawadalla)\n\n\nDataset Description\n-------------------\n\n\n* Paper: coming soon### Dataset Summary\n\n\nThis dataset includes ~690,000 figures from ~150,000 scientific papers taken from arXiv papers. Each object in the json file is a single research paper with a list of figures each with their caption and surrounding context.### Data Instances\n\n\nAn example of an object in the 'URL' file:\n\n\nDataset Creation\n----------------\n\n\nWe utilized the bulk access of arXiv's papers.### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------### Social Impact of Dataset### Discussion of Biases### Other Known Limitations\n\n\nAdditional Information\n----------------------### Dataset Curators\n\n\ncoming soon" ]
b382a58b0c17cc18aa4b985db340ac2a92f6b12f
# Dataset Card for Evaluation run of 01-ai/Yi-34B-Chat ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/01-ai/Yi-34B-Chat - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [01-ai/Yi-34B-Chat](https://huggingface.co/01-ai/Yi-34B-Chat) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_01-ai__Yi-34B-Chat", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-12-05T03:47:25.491369](https://huggingface.co/datasets/open-llm-leaderboard/details_01-ai__Yi-34B-Chat/blob/main/results_2023-12-05T03-47-25.491369.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.7393930299846158, "acc_stderr": 0.028807135333088364, "acc_norm": 0.7489434623723922, "acc_norm_stderr": 0.02935457295982731, "mc1": 0.3843329253365973, "mc1_stderr": 0.017028707301245203, "mc2": 0.5536831362008046, "mc2_stderr": 0.015524186394858242 }, "harness|arc:challenge|25": { "acc": 0.6373720136518771, "acc_stderr": 0.014049106564955012, "acc_norm": 0.6544368600682594, "acc_norm_stderr": 0.013896938461145678 }, "harness|hellaswag|10": { "acc": 0.6536546504680343, "acc_stderr": 0.004748324319714274, "acc_norm": 0.8415654252141008, "acc_norm_stderr": 0.003644017383711605 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.45, "acc_stderr": 0.05, "acc_norm": 0.45, "acc_norm_stderr": 0.05 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.7111111111111111, "acc_stderr": 0.03915450630414251, "acc_norm": 0.7111111111111111, "acc_norm_stderr": 0.03915450630414251 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.8552631578947368, "acc_stderr": 0.028631951845930387, "acc_norm": 0.8552631578947368, "acc_norm_stderr": 0.028631951845930387 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.79, "acc_stderr": 0.040936018074033256, "acc_norm": 0.79, "acc_norm_stderr": 0.040936018074033256 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.7924528301886793, "acc_stderr": 0.02495991802891127, "acc_norm": 0.7924528301886793, "acc_norm_stderr": 0.02495991802891127 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.8472222222222222, "acc_stderr": 0.030085743248565666, "acc_norm": 0.8472222222222222, "acc_norm_stderr": 0.030085743248565666 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.53, "acc_stderr": 0.05016135580465919, "acc_norm": 0.53, "acc_norm_stderr": 0.05016135580465919 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.59, "acc_stderr": 0.04943110704237101, "acc_norm": 0.59, "acc_norm_stderr": 0.04943110704237101 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.34, "acc_stderr": 0.04760952285695235, "acc_norm": 0.34, "acc_norm_stderr": 0.04760952285695235 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.6994219653179191, "acc_stderr": 0.034961014811911786, "acc_norm": 0.6994219653179191, "acc_norm_stderr": 0.034961014811911786 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.46078431372549017, "acc_stderr": 0.049598599663841815, "acc_norm": 0.46078431372549017, "acc_norm_stderr": 0.049598599663841815 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.82, "acc_stderr": 0.03861229196653695, "acc_norm": 0.82, "acc_norm_stderr": 0.03861229196653695 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.7659574468085106, "acc_stderr": 0.02767845257821239, "acc_norm": 0.7659574468085106, "acc_norm_stderr": 0.02767845257821239 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.5526315789473685, "acc_stderr": 0.046774730044911984, "acc_norm": 0.5526315789473685, "acc_norm_stderr": 0.046774730044911984 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.8, "acc_stderr": 0.0333333333333333, "acc_norm": 0.8, "acc_norm_stderr": 0.0333333333333333 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.6349206349206349, "acc_stderr": 0.024796060602699965, "acc_norm": 0.6349206349206349, "acc_norm_stderr": 0.024796060602699965 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.5396825396825397, "acc_stderr": 0.04458029125470973, "acc_norm": 0.5396825396825397, "acc_norm_stderr": 0.04458029125470973 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.56, "acc_stderr": 0.049888765156985884, "acc_norm": 0.56, "acc_norm_stderr": 0.049888765156985884 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.8709677419354839, "acc_stderr": 0.019070889254792767, "acc_norm": 0.8709677419354839, "acc_norm_stderr": 0.019070889254792767 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.6206896551724138, "acc_stderr": 0.03413963805906235, "acc_norm": 0.6206896551724138, "acc_norm_stderr": 0.03413963805906235 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.81, "acc_stderr": 0.03942772444036625, "acc_norm": 0.81, "acc_norm_stderr": 0.03942772444036625 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.8545454545454545, "acc_stderr": 0.027530196355066573, "acc_norm": 0.8545454545454545, "acc_norm_stderr": 0.027530196355066573 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.898989898989899, "acc_stderr": 0.021469735576055343, "acc_norm": 0.898989898989899, "acc_norm_stderr": 0.021469735576055343 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.9533678756476683, "acc_stderr": 0.015216761819262585, "acc_norm": 0.9533678756476683, "acc_norm_stderr": 0.015216761819262585 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.7846153846153846, "acc_stderr": 0.020843034557462878, "acc_norm": 0.7846153846153846, "acc_norm_stderr": 0.020843034557462878 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.35555555555555557, "acc_stderr": 0.029185714949857403, "acc_norm": 0.35555555555555557, "acc_norm_stderr": 0.029185714949857403 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.8361344537815126, "acc_stderr": 0.024044054940440488, "acc_norm": 0.8361344537815126, "acc_norm_stderr": 0.024044054940440488 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.5033112582781457, "acc_stderr": 0.04082393379449654, "acc_norm": 0.5033112582781457, "acc_norm_stderr": 0.04082393379449654 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.908256880733945, "acc_stderr": 0.012376323409137123, "acc_norm": 0.908256880733945, "acc_norm_stderr": 0.012376323409137123 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.6342592592592593, "acc_stderr": 0.03284738857647206, "acc_norm": 0.6342592592592593, "acc_norm_stderr": 0.03284738857647206 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.9068627450980392, "acc_stderr": 0.020397853969426998, "acc_norm": 0.9068627450980392, "acc_norm_stderr": 0.020397853969426998 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.9029535864978903, "acc_stderr": 0.019269323025640255, "acc_norm": 0.9029535864978903, "acc_norm_stderr": 0.019269323025640255 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.8116591928251121, "acc_stderr": 0.026241132996407256, "acc_norm": 0.8116591928251121, "acc_norm_stderr": 0.026241132996407256 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.8931297709923665, "acc_stderr": 0.027096548624883733, "acc_norm": 0.8931297709923665, "acc_norm_stderr": 0.027096548624883733 }, "harness|hendrycksTest-international_law|5": { "acc": 0.8925619834710744, "acc_stderr": 0.028268812192540627, "acc_norm": 0.8925619834710744, "acc_norm_stderr": 0.028268812192540627 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.8888888888888888, "acc_stderr": 0.03038159675665167, "acc_norm": 0.8888888888888888, "acc_norm_stderr": 0.03038159675665167 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.852760736196319, "acc_stderr": 0.027839915278339657, "acc_norm": 0.852760736196319, "acc_norm_stderr": 0.027839915278339657 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.6160714285714286, "acc_stderr": 0.04616143075028546, "acc_norm": 0.6160714285714286, "acc_norm_stderr": 0.04616143075028546 }, "harness|hendrycksTest-management|5": { "acc": 0.8737864077669902, "acc_stderr": 0.0328818027880863, "acc_norm": 0.8737864077669902, "acc_norm_stderr": 0.0328818027880863 }, "harness|hendrycksTest-marketing|5": { "acc": 0.9145299145299145, "acc_stderr": 0.01831589168562586, "acc_norm": 0.9145299145299145, "acc_norm_stderr": 0.01831589168562586 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.87, "acc_stderr": 0.033799766898963086, "acc_norm": 0.87, "acc_norm_stderr": 0.033799766898963086 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.8991060025542784, "acc_stderr": 0.01077047201488672, "acc_norm": 0.8991060025542784, "acc_norm_stderr": 0.01077047201488672 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.8092485549132948, "acc_stderr": 0.021152676966575277, "acc_norm": 0.8092485549132948, "acc_norm_stderr": 0.021152676966575277 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.7016759776536313, "acc_stderr": 0.01530184004512928, "acc_norm": 0.7016759776536313, "acc_norm_stderr": 0.01530184004512928 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.8300653594771242, "acc_stderr": 0.02150538312123137, "acc_norm": 0.8300653594771242, "acc_norm_stderr": 0.02150538312123137 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.8102893890675241, "acc_stderr": 0.02226819625878322, "acc_norm": 0.8102893890675241, "acc_norm_stderr": 0.02226819625878322 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.8703703703703703, "acc_stderr": 0.018689725721062072, "acc_norm": 0.8703703703703703, "acc_norm_stderr": 0.018689725721062072 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.6170212765957447, "acc_stderr": 0.02899908090480618, "acc_norm": 0.6170212765957447, "acc_norm_stderr": 0.02899908090480618 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.5495436766623207, "acc_stderr": 0.012707390438502348, "acc_norm": 0.5495436766623207, "acc_norm_stderr": 0.012707390438502348 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.7830882352941176, "acc_stderr": 0.025035845227711274, "acc_norm": 0.7830882352941176, "acc_norm_stderr": 0.025035845227711274 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.826797385620915, "acc_stderr": 0.015309329266969138, "acc_norm": 0.826797385620915, "acc_norm_stderr": 0.015309329266969138 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.7272727272727273, "acc_stderr": 0.04265792110940589, "acc_norm": 0.7272727272727273, "acc_norm_stderr": 0.04265792110940589 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.8367346938775511, "acc_stderr": 0.02366169917709861, "acc_norm": 0.8367346938775511, "acc_norm_stderr": 0.02366169917709861 }, "harness|hendrycksTest-sociology|5": { "acc": 0.8855721393034826, "acc_stderr": 0.022509345325101706, "acc_norm": 0.8855721393034826, "acc_norm_stderr": 0.022509345325101706 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.89, "acc_stderr": 0.03144660377352203, "acc_norm": 0.89, "acc_norm_stderr": 0.03144660377352203 }, "harness|hendrycksTest-virology|5": { "acc": 0.5843373493975904, "acc_stderr": 0.03836722176598053, "acc_norm": 0.5843373493975904, "acc_norm_stderr": 0.03836722176598053 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.8771929824561403, "acc_stderr": 0.02517298435015577, "acc_norm": 0.8771929824561403, "acc_norm_stderr": 0.02517298435015577 }, "harness|truthfulqa:mc|0": { "mc1": 0.3843329253365973, "mc1_stderr": 0.017028707301245203, "mc2": 0.5536831362008046, "mc2_stderr": 0.015524186394858242 }, "harness|winogrande|5": { "acc": 0.8011049723756906, "acc_stderr": 0.01121862997251531 }, "harness|gsm8k|5": { "acc": 0.3191811978771797, "acc_stderr": 0.012840345676251648 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_01-ai__Yi-34B-Chat
[ "region:us" ]
2023-11-26T08:58:33+00:00
{"pretty_name": "Evaluation run of 01-ai/Yi-34B-Chat", "dataset_summary": "Dataset automatically created during the evaluation run of model [01-ai/Yi-34B-Chat](https://huggingface.co/01-ai/Yi-34B-Chat) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_01-ai__Yi-34B-Chat\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-12-05T03:47:25.491369](https://huggingface.co/datasets/open-llm-leaderboard/details_01-ai__Yi-34B-Chat/blob/main/results_2023-12-05T03-47-25.491369.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.7393930299846158,\n \"acc_stderr\": 0.028807135333088364,\n \"acc_norm\": 0.7489434623723922,\n \"acc_norm_stderr\": 0.02935457295982731,\n \"mc1\": 0.3843329253365973,\n \"mc1_stderr\": 0.017028707301245203,\n \"mc2\": 0.5536831362008046,\n \"mc2_stderr\": 0.015524186394858242\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.6373720136518771,\n \"acc_stderr\": 0.014049106564955012,\n \"acc_norm\": 0.6544368600682594,\n \"acc_norm_stderr\": 0.013896938461145678\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.6536546504680343,\n \"acc_stderr\": 0.004748324319714274,\n \"acc_norm\": 0.8415654252141008,\n \"acc_norm_stderr\": 0.003644017383711605\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.45,\n \"acc_stderr\": 0.05,\n \"acc_norm\": 0.45,\n \"acc_norm_stderr\": 0.05\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.7111111111111111,\n \"acc_stderr\": 0.03915450630414251,\n \"acc_norm\": 0.7111111111111111,\n \"acc_norm_stderr\": 0.03915450630414251\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.8552631578947368,\n \"acc_stderr\": 0.028631951845930387,\n \"acc_norm\": 0.8552631578947368,\n \"acc_norm_stderr\": 0.028631951845930387\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.79,\n \"acc_stderr\": 0.040936018074033256,\n \"acc_norm\": 0.79,\n \"acc_norm_stderr\": 0.040936018074033256\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.7924528301886793,\n \"acc_stderr\": 0.02495991802891127,\n \"acc_norm\": 0.7924528301886793,\n \"acc_norm_stderr\": 0.02495991802891127\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.8472222222222222,\n \"acc_stderr\": 0.030085743248565666,\n \"acc_norm\": 0.8472222222222222,\n \"acc_norm_stderr\": 0.030085743248565666\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.53,\n \"acc_stderr\": 0.05016135580465919,\n \"acc_norm\": 0.53,\n \"acc_norm_stderr\": 0.05016135580465919\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.59,\n \"acc_stderr\": 0.04943110704237101,\n \"acc_norm\": 0.59,\n \"acc_norm_stderr\": 0.04943110704237101\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.34,\n \"acc_stderr\": 0.04760952285695235,\n \"acc_norm\": 0.34,\n \"acc_norm_stderr\": 0.04760952285695235\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.6994219653179191,\n \"acc_stderr\": 0.034961014811911786,\n \"acc_norm\": 0.6994219653179191,\n \"acc_norm_stderr\": 0.034961014811911786\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.46078431372549017,\n \"acc_stderr\": 0.049598599663841815,\n \"acc_norm\": 0.46078431372549017,\n \"acc_norm_stderr\": 0.049598599663841815\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.82,\n \"acc_stderr\": 0.03861229196653695,\n \"acc_norm\": 0.82,\n \"acc_norm_stderr\": 0.03861229196653695\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.7659574468085106,\n \"acc_stderr\": 0.02767845257821239,\n \"acc_norm\": 0.7659574468085106,\n \"acc_norm_stderr\": 0.02767845257821239\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.5526315789473685,\n \"acc_stderr\": 0.046774730044911984,\n \"acc_norm\": 0.5526315789473685,\n \"acc_norm_stderr\": 0.046774730044911984\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.8,\n \"acc_stderr\": 0.0333333333333333,\n \"acc_norm\": 0.8,\n \"acc_norm_stderr\": 0.0333333333333333\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.6349206349206349,\n \"acc_stderr\": 0.024796060602699965,\n \"acc_norm\": 0.6349206349206349,\n \"acc_norm_stderr\": 0.024796060602699965\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.5396825396825397,\n \"acc_stderr\": 0.04458029125470973,\n \"acc_norm\": 0.5396825396825397,\n \"acc_norm_stderr\": 0.04458029125470973\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.56,\n \"acc_stderr\": 0.049888765156985884,\n \"acc_norm\": 0.56,\n \"acc_norm_stderr\": 0.049888765156985884\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.8709677419354839,\n \"acc_stderr\": 0.019070889254792767,\n \"acc_norm\": 0.8709677419354839,\n \"acc_norm_stderr\": 0.019070889254792767\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.6206896551724138,\n \"acc_stderr\": 0.03413963805906235,\n \"acc_norm\": 0.6206896551724138,\n \"acc_norm_stderr\": 0.03413963805906235\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.81,\n \"acc_stderr\": 0.03942772444036625,\n \"acc_norm\": 0.81,\n \"acc_norm_stderr\": 0.03942772444036625\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.8545454545454545,\n \"acc_stderr\": 0.027530196355066573,\n \"acc_norm\": 0.8545454545454545,\n \"acc_norm_stderr\": 0.027530196355066573\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.898989898989899,\n \"acc_stderr\": 0.021469735576055343,\n \"acc_norm\": 0.898989898989899,\n \"acc_norm_stderr\": 0.021469735576055343\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.9533678756476683,\n \"acc_stderr\": 0.015216761819262585,\n \"acc_norm\": 0.9533678756476683,\n \"acc_norm_stderr\": 0.015216761819262585\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.7846153846153846,\n \"acc_stderr\": 0.020843034557462878,\n \"acc_norm\": 0.7846153846153846,\n \"acc_norm_stderr\": 0.020843034557462878\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.35555555555555557,\n \"acc_stderr\": 0.029185714949857403,\n \"acc_norm\": 0.35555555555555557,\n \"acc_norm_stderr\": 0.029185714949857403\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.8361344537815126,\n \"acc_stderr\": 0.024044054940440488,\n \"acc_norm\": 0.8361344537815126,\n \"acc_norm_stderr\": 0.024044054940440488\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.5033112582781457,\n \"acc_stderr\": 0.04082393379449654,\n \"acc_norm\": 0.5033112582781457,\n \"acc_norm_stderr\": 0.04082393379449654\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.908256880733945,\n \"acc_stderr\": 0.012376323409137123,\n \"acc_norm\": 0.908256880733945,\n \"acc_norm_stderr\": 0.012376323409137123\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.6342592592592593,\n \"acc_stderr\": 0.03284738857647206,\n \"acc_norm\": 0.6342592592592593,\n \"acc_norm_stderr\": 0.03284738857647206\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.9068627450980392,\n \"acc_stderr\": 0.020397853969426998,\n \"acc_norm\": 0.9068627450980392,\n \"acc_norm_stderr\": 0.020397853969426998\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.9029535864978903,\n \"acc_stderr\": 0.019269323025640255,\n \"acc_norm\": 0.9029535864978903,\n \"acc_norm_stderr\": 0.019269323025640255\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.8116591928251121,\n \"acc_stderr\": 0.026241132996407256,\n \"acc_norm\": 0.8116591928251121,\n \"acc_norm_stderr\": 0.026241132996407256\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.8931297709923665,\n \"acc_stderr\": 0.027096548624883733,\n \"acc_norm\": 0.8931297709923665,\n \"acc_norm_stderr\": 0.027096548624883733\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.8925619834710744,\n \"acc_stderr\": 0.028268812192540627,\n \"acc_norm\": 0.8925619834710744,\n \"acc_norm_stderr\": 0.028268812192540627\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.8888888888888888,\n \"acc_stderr\": 0.03038159675665167,\n \"acc_norm\": 0.8888888888888888,\n \"acc_norm_stderr\": 0.03038159675665167\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.852760736196319,\n \"acc_stderr\": 0.027839915278339657,\n \"acc_norm\": 0.852760736196319,\n \"acc_norm_stderr\": 0.027839915278339657\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.6160714285714286,\n \"acc_stderr\": 0.04616143075028546,\n \"acc_norm\": 0.6160714285714286,\n \"acc_norm_stderr\": 0.04616143075028546\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.8737864077669902,\n \"acc_stderr\": 0.0328818027880863,\n \"acc_norm\": 0.8737864077669902,\n \"acc_norm_stderr\": 0.0328818027880863\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.9145299145299145,\n \"acc_stderr\": 0.01831589168562586,\n \"acc_norm\": 0.9145299145299145,\n \"acc_norm_stderr\": 0.01831589168562586\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.87,\n \"acc_stderr\": 0.033799766898963086,\n \"acc_norm\": 0.87,\n \"acc_norm_stderr\": 0.033799766898963086\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.8991060025542784,\n \"acc_stderr\": 0.01077047201488672,\n \"acc_norm\": 0.8991060025542784,\n \"acc_norm_stderr\": 0.01077047201488672\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.8092485549132948,\n \"acc_stderr\": 0.021152676966575277,\n \"acc_norm\": 0.8092485549132948,\n \"acc_norm_stderr\": 0.021152676966575277\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.7016759776536313,\n \"acc_stderr\": 0.01530184004512928,\n \"acc_norm\": 0.7016759776536313,\n \"acc_norm_stderr\": 0.01530184004512928\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.8300653594771242,\n \"acc_stderr\": 0.02150538312123137,\n \"acc_norm\": 0.8300653594771242,\n \"acc_norm_stderr\": 0.02150538312123137\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.8102893890675241,\n \"acc_stderr\": 0.02226819625878322,\n \"acc_norm\": 0.8102893890675241,\n \"acc_norm_stderr\": 0.02226819625878322\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.8703703703703703,\n \"acc_stderr\": 0.018689725721062072,\n \"acc_norm\": 0.8703703703703703,\n \"acc_norm_stderr\": 0.018689725721062072\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.6170212765957447,\n \"acc_stderr\": 0.02899908090480618,\n \"acc_norm\": 0.6170212765957447,\n \"acc_norm_stderr\": 0.02899908090480618\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.5495436766623207,\n \"acc_stderr\": 0.012707390438502348,\n \"acc_norm\": 0.5495436766623207,\n \"acc_norm_stderr\": 0.012707390438502348\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.7830882352941176,\n \"acc_stderr\": 0.025035845227711274,\n \"acc_norm\": 0.7830882352941176,\n \"acc_norm_stderr\": 0.025035845227711274\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.826797385620915,\n \"acc_stderr\": 0.015309329266969138,\n \"acc_norm\": 0.826797385620915,\n \"acc_norm_stderr\": 0.015309329266969138\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.7272727272727273,\n \"acc_stderr\": 0.04265792110940589,\n \"acc_norm\": 0.7272727272727273,\n \"acc_norm_stderr\": 0.04265792110940589\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.8367346938775511,\n \"acc_stderr\": 0.02366169917709861,\n \"acc_norm\": 0.8367346938775511,\n \"acc_norm_stderr\": 0.02366169917709861\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.8855721393034826,\n \"acc_stderr\": 0.022509345325101706,\n \"acc_norm\": 0.8855721393034826,\n \"acc_norm_stderr\": 0.022509345325101706\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.89,\n \"acc_stderr\": 0.03144660377352203,\n \"acc_norm\": 0.89,\n \"acc_norm_stderr\": 0.03144660377352203\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.5843373493975904,\n \"acc_stderr\": 0.03836722176598053,\n \"acc_norm\": 0.5843373493975904,\n \"acc_norm_stderr\": 0.03836722176598053\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.8771929824561403,\n \"acc_stderr\": 0.02517298435015577,\n \"acc_norm\": 0.8771929824561403,\n \"acc_norm_stderr\": 0.02517298435015577\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.3843329253365973,\n \"mc1_stderr\": 0.017028707301245203,\n \"mc2\": 0.5536831362008046,\n \"mc2_stderr\": 0.015524186394858242\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.8011049723756906,\n \"acc_stderr\": 0.01121862997251531\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.3191811978771797,\n \"acc_stderr\": 0.012840345676251648\n }\n}\n```", "repo_url": "https://huggingface.co/01-ai/Yi-34B-Chat", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|arc:challenge|25_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|arc:challenge|25_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_drop_3", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|drop|3_2023-11-26T08-55-32.839765.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-11-26T08-55-32.839765.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|gsm8k|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|gsm8k|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hellaswag|10_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hellaswag|10_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-management|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-virology|5_2023-11-26T08-55-32.839765.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-05T03-47-25.491369.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-management|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-virology|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|truthfulqa:mc|0_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["**/details_harness|winogrande|5_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["**/details_harness|winogrande|5_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-12-05T03-47-25.491369.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_11_26T08_55_32.839765", "path": ["results_2023-11-26T08-55-32.839765.parquet"]}, {"split": "2023_12_05T03_47_25.491369", "path": ["results_2023-12-05T03-47-25.491369.parquet"]}, {"split": "latest", "path": ["results_2023-12-05T03-47-25.491369.parquet"]}]}]}
2023-12-05T03:50:56+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of 01-ai/Yi-34B-Chat ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model 01-ai/Yi-34B-Chat on the Open LLM Leaderboard. The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-12-05T03:47:25.491369(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of 01-ai/Yi-34B-Chat", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model 01-ai/Yi-34B-Chat on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-05T03:47:25.491369(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of 01-ai/Yi-34B-Chat", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model 01-ai/Yi-34B-Chat on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-05T03:47:25.491369(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 19, 31, 168, 66, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of 01-ai/Yi-34B-Chat## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model 01-ai/Yi-34B-Chat on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-12-05T03:47:25.491369(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
5282a2936020f892c6d9ab559212b41446e2b2db
Training dataset for Alpaca-LoRA negation of [PEM composition](https://arxiv.org/abs/2306.14870). Instructions for model evaluation on helpfulness and toxicity. More concise interpretation can be found at [Git repo](https://github.com/hkust-nlp/PEM_composition). **Toxic datasets abusement is dangerous for AI community, so this repo is gated and users have to request for approval.**
jinghan23/DatasetofPEMCompostition
[ "license:cc-by-nc-4.0", "arxiv:2306.14870", "region:us" ]
2023-11-26T10:03:37+00:00
{"license": "cc-by-nc-4.0"}
2023-11-26T10:12:50+00:00
[ "2306.14870" ]
[]
TAGS #license-cc-by-nc-4.0 #arxiv-2306.14870 #region-us
Training dataset for Alpaca-LoRA negation of PEM composition. Instructions for model evaluation on helpfulness and toxicity. More concise interpretation can be found at Git repo. Toxic datasets abusement is dangerous for AI community, so this repo is gated and users have to request for approval.
[]
[ "TAGS\n#license-cc-by-nc-4.0 #arxiv-2306.14870 #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#license-cc-by-nc-4.0 #arxiv-2306.14870 #region-us \n" ]
576ad9d00dd764ef1d1fdee59b5fc39dab0a5d31
# German BGB Law Dataset (Bürgerliches Gesetzbuch) ## Dataset Description - **Date of Last Paragraph Update:** April 2023 - **Dataset Guarantee:** The dataset is provided "as is," and there is no guarantee for the correctness or completeness of the data. ### Dataset Summary The BGB Law Dataset contains legal text from the German Civil Code (Bürgerliches Gesetzbuch - BGB). It focuses on the general principles of German civil law, and the dataset is designed for tasks related to legal text analysis. ## Dataset Structure ### Data Instances A typical data point in the dataset comprises a legal paragraph and its corresponding text. For example: ```json { 'paragraph': '§ 1 Beginn der Rechtsfähigkeit', 'text': 'Die Rechtsfähigkeit des Menschen beginnt mit der Vollendung der Geburt.' } ```
nookbe/Buergerliches_Gesetzbuch_BGB
[ "task_categories:text-classification", "size_categories:1K<n<10K", "language:de", "license:mit", "legal", "region:us" ]
2023-11-26T10:15:25+00:00
{"language": ["de"], "license": "mit", "size_categories": ["1K<n<10K"], "task_categories": ["text-classification"], "pretty_name": "BGB", "tags": ["legal"]}
2023-11-26T11:44:58+00:00
[]
[ "de" ]
TAGS #task_categories-text-classification #size_categories-1K<n<10K #language-German #license-mit #legal #region-us
# German BGB Law Dataset (Bürgerliches Gesetzbuch) ## Dataset Description - Date of Last Paragraph Update: April 2023 - Dataset Guarantee: The dataset is provided "as is," and there is no guarantee for the correctness or completeness of the data. ### Dataset Summary The BGB Law Dataset contains legal text from the German Civil Code (Bürgerliches Gesetzbuch - BGB). It focuses on the general principles of German civil law, and the dataset is designed for tasks related to legal text analysis. ## Dataset Structure ### Data Instances A typical data point in the dataset comprises a legal paragraph and its corresponding text. For example:
[ "# German BGB Law Dataset (Bürgerliches Gesetzbuch)", "## Dataset Description\n\n- Date of Last Paragraph Update: April 2023\n- Dataset Guarantee: The dataset is provided \"as is,\" and there is no guarantee for the correctness or completeness of the data.", "### Dataset Summary\n\nThe BGB Law Dataset contains legal text from the German Civil Code (Bürgerliches Gesetzbuch - BGB). It focuses on the general principles of German civil law, and the dataset is designed for tasks related to legal text analysis.", "## Dataset Structure", "### Data Instances\n\nA typical data point in the dataset comprises a legal paragraph and its corresponding text. For example:" ]
[ "TAGS\n#task_categories-text-classification #size_categories-1K<n<10K #language-German #license-mit #legal #region-us \n", "# German BGB Law Dataset (Bürgerliches Gesetzbuch)", "## Dataset Description\n\n- Date of Last Paragraph Update: April 2023\n- Dataset Guarantee: The dataset is provided \"as is,\" and there is no guarantee for the correctness or completeness of the data.", "### Dataset Summary\n\nThe BGB Law Dataset contains legal text from the German Civil Code (Bürgerliches Gesetzbuch - BGB). It focuses on the general principles of German civil law, and the dataset is designed for tasks related to legal text analysis.", "## Dataset Structure", "### Data Instances\n\nA typical data point in the dataset comprises a legal paragraph and its corresponding text. For example:" ]
[ 40, 15, 47, 60, 6, 28 ]
[ "passage: TAGS\n#task_categories-text-classification #size_categories-1K<n<10K #language-German #license-mit #legal #region-us \n# German BGB Law Dataset (Bürgerliches Gesetzbuch)## Dataset Description\n\n- Date of Last Paragraph Update: April 2023\n- Dataset Guarantee: The dataset is provided \"as is,\" and there is no guarantee for the correctness or completeness of the data.### Dataset Summary\n\nThe BGB Law Dataset contains legal text from the German Civil Code (Bürgerliches Gesetzbuch - BGB). It focuses on the general principles of German civil law, and the dataset is designed for tasks related to legal text analysis.## Dataset Structure### Data Instances\n\nA typical data point in the dataset comprises a legal paragraph and its corresponding text. For example:" ]
6a433bbc30bea2d1053484c4bdfb75673044561a
A dataset containing safe and vulnerable code to fine-tune a llm for defect detection. The data is extracted from the wonderful work in the [CVEFixes](https://github.com/secureIT-project/CVEfixes) repository. Citation: ``` @inproceedings{bhandari2021:cvefixes, title = {{CVEfixes: Automated Collection of Vulnerabilities and Their Fixes from Open-Source Software}}, booktitle = {{Proceedings of the 17th International Conference on Predictive Models and Data Analytics in Software Engineering (PROMISE '21)}}, author = {Bhandari, Guru and Naseer, Amara and Moonen, Leon}, year = {2021}, pages = {10}, publisher = {{ACM}}, doi = {10.1145/3475960.3475985}, copyright = {Open Access}, isbn = {978-1-4503-8680-7}, language = {en} } ```
mcanoglu/defect-detection
[ "license:mit", "region:us" ]
2023-11-26T10:21:37+00:00
{"license": "mit"}
2024-01-18T19:23:19+00:00
[]
[]
TAGS #license-mit #region-us
A dataset containing safe and vulnerable code to fine-tune a llm for defect detection. The data is extracted from the wonderful work in the CVEFixes repository. Citation:
[]
[ "TAGS\n#license-mit #region-us \n" ]
[ 11 ]
[ "passage: TAGS\n#license-mit #region-us \n" ]
1600f263c2b5d25fdbcc329920ad228e550487d4
# Dataset Card for Dataset Name ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This repository contains a machine-translated French version of [SICK](https://huggingface.co/datasets/sick) (Sentences Involving Compositional Knowldedge) dataset. The goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B), which is a classification task (given two sentences, predict one of three labels). Apart from machine translating the sentence pairs, the rest of information (pair ID, labels, source dataset of each sentence, train/dev/test subset partition) has been left intact as in the original English dataset. The dataset is here formatted in a similar manner (TSV format) as the widely used [XNLI](https://huggingface.co/datasets/xnli) dataset for convenience. The machine translation produced in the present repository should be of pretty decent quality, given the average short length of the sentences in SICK dataset. ### Supported Tasks and Leaderboards This dataset can be used for the task of Natural Language Inference (NLI), also known as Recognizing Textual Entailment (RTE), which is a sentence-pair classification task. ## Dataset Structure ### Data Fields - `pair_ID`: Sentence pair ID. - `sentence_A`: Sentence A, also known as premise in other NLI datasets. - `sentence_B`: Sentence B, also known as hypothesis in other NLI datasets. - `entailment_label`: textual entailment gold label (NEUTRAL, ENTAILMENT, or CONTRADICTION). - `entailment_AB`: Entailment label for the A-B order (A_neutral_B, A_entails_B, or A_contradicts_B). - `entailment_BA`: Entailment label for the B-A order (B_neutral_A, B_entails_A, or B_contradicts_A). - `original_SICK_sentence_A`: The original premise from the English source dataset. - `original_SICK_sentence_B`: The original hypothesis from the English source dataset. - `sentence_A_dataset`: The dataset from which the original sentence A was extracted (FLICKR vs. SEMEVAL). - `sentence_B_dataset`: The dataset from which the original sentence B was extracted (FLICKR vs. SEMEVAL). ### Data Splits | name |Entailment|Neutral|Contradiction|Total| |--------|---------:|------:|------------:|------------:| |train | 1274 | 2524 | 641 | 4439 | |validation | 143 | 281 | 71 | 495 | |test | 1404 | 2790 | 712 | 4906 | For the A-B order: | name |A_entails_B|A_neutral_B|A_contradicts_B| |--------|---------:|------:|------------:| |train | 1274 | 2381 | 784 | |validation | 143 | 266 | 86 | |test | 1404 | 2621 | 881 | For the B-A order: | name |B_entails_A|B_neutral_A|B_contradicts_A| |--------|---------:|------:|------------:| |train | 606 | 3072 | 761 | |validation | 84 | 329 | 82 | |test | 610 | 3431 | 865 | ## Dataset Creation The dataset was machine translated from English to French using the latest neural machine translation [opus-mt-tc-big](https://huggingface.co/Helsinki-NLP/opus-mt-tc-big-en-fr) model available for French. The translation of the sentences was carried out on November 26th, 2023. ## Additional Information ### Citation Information **BibTeX:** ````BibTeX @inproceedings{marelli-etal-2014-sick, title = "A {SICK} cure for the evaluation of compositional distributional semantic models", author = "Marelli, Marco and Menini, Stefano and Baroni, Marco and Bentivogli, Luisa and Bernardi, Raffaella and Zamparelli, Roberto", editor = "Calzolari, Nicoletta and Choukri, Khalid and Declerck, Thierry and Loftsson, Hrafn and Maegaard, Bente and Mariani, Joseph and Moreno, Asuncion and Odijk, Jan and Piperidis, Stelios", booktitle = "Proceedings of the Ninth International Conference on Language Resources and Evaluation ({LREC}'14)", month = may, year = "2014", address = "Reykjavik, Iceland", publisher = "European Language Resources Association (ELRA)", url = "http://www.lrec-conf.org/proceedings/lrec2014/pdf/363_Paper.pdf", pages = "216--223", abstract = "Shared and internationally recognized benchmarks are fundamental for the development of any computational system. We aim to help the research community working on compositional distributional semantic models (CDSMs) by providing SICK (Sentences Involving Compositional Knowldedge), a large size English benchmark tailored for them. SICK consists of about 10,000 English sentence pairs that include many examples of the lexical, syntactic and semantic phenomena that CDSMs are expected to account for, but do not require dealing with other aspects of existing sentential data sets (idiomatic multiword expressions, named entities, telegraphic language) that are not within the scope of CDSMs. By means of crowdsourcing techniques, each pair was annotated for two crucial semantic tasks: relatedness in meaning (with a 5-point rating scale as gold score) and entailment relation between the two elements (with three possible gold labels: entailment, contradiction, and neutral). The SICK data set was used in SemEval-2014 Task 1, and it freely available for research purposes.", } @inproceedings{tiedemann-thottingal-2020-opus, title = "{OPUS}-{MT} {--} Building open translation services for the World", author = {Tiedemann, J{\"o}rg and Thottingal, Santhosh}, booktitle = "Proceedings of the 22nd Annual Conference of the European Association for Machine Translation", month = nov, year = "2020", address = "Lisboa, Portugal", publisher = "European Association for Machine Translation", url = "https://aclanthology.org/2020.eamt-1.61", pages = "479--480", abstract = "This paper presents OPUS-MT a project that focuses on the development of free resources and tools for machine translation. The current status is a repository of over 1,000 pre-trained neural machine translation models that are ready to be launched in on-line translation services. For this we also provide open source implementations of web applications that can run efficiently on average desktop hardware with a straightforward setup and installation.", } ```` **ACL:** Marco Marelli, Stefano Menini, Marco Baroni, Luisa Bentivogli, Raffaella Bernardi, and Roberto Zamparelli. 2014. [A SICK cure for the evaluation of compositional distributional semantic models](http://www.lrec-conf.org/proceedings/lrec2014/pdf/363_Paper.pdf). In *Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC'14)*, pages 216–223, Reykjavik, Iceland. European Language Resources Association (ELRA). Jörg Tiedemann and Santhosh Thottingal. 2020. [OPUS-MT – Building open translation services for the World](https://aclanthology.org/2020.eamt-1.61). In *Proceedings of the 22nd Annual Conference of the European Association for Machine Translation*, pages 479–480, Lisboa, Portugal. European Association for Machine Translation. ### Acknowledgements This translation of the original dataset was done as part of a research project supported by the Defence Innovation Agency (AID) of the Directorate General of Armament (DGA) of the French Ministry of Armed Forces, and by the ICO, _Institut Cybersécurité Occitanie_, funded by Région Occitanie, France.
maximoss/sick-fr-mt
[ "task_categories:text-classification", "task_ids:natural-language-inference", "task_ids:multi-input-text-classification", "size_categories:1K<n<10K", "language:fr", "license:cc-by-nc-sa-4.0", "region:us" ]
2023-11-26T10:23:05+00:00
{"language": ["fr"], "license": "cc-by-nc-sa-4.0", "size_categories": ["1K<n<10K"], "task_categories": ["text-classification"], "task_ids": ["natural-language-inference", "multi-input-text-classification"]}
2024-02-04T12:28:57+00:00
[]
[ "fr" ]
TAGS #task_categories-text-classification #task_ids-natural-language-inference #task_ids-multi-input-text-classification #size_categories-1K<n<10K #language-French #license-cc-by-nc-sa-4.0 #region-us
Dataset Card for Dataset Name ============================= Dataset Description ------------------- * Homepage: * Repository: * Paper: * Leaderboard: * Point of Contact: ### Dataset Summary This repository contains a machine-translated French version of SICK (Sentences Involving Compositional Knowldedge) dataset. The goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B), which is a classification task (given two sentences, predict one of three labels). Apart from machine translating the sentence pairs, the rest of information (pair ID, labels, source dataset of each sentence, train/dev/test subset partition) has been left intact as in the original English dataset. The dataset is here formatted in a similar manner (TSV format) as the widely used XNLI dataset for convenience. The machine translation produced in the present repository should be of pretty decent quality, given the average short length of the sentences in SICK dataset. ### Supported Tasks and Leaderboards This dataset can be used for the task of Natural Language Inference (NLI), also known as Recognizing Textual Entailment (RTE), which is a sentence-pair classification task. Dataset Structure ----------------- ### Data Fields * 'pair\_ID': Sentence pair ID. * 'sentence\_A': Sentence A, also known as premise in other NLI datasets. * 'sentence\_B': Sentence B, also known as hypothesis in other NLI datasets. * 'entailment\_label': textual entailment gold label (NEUTRAL, ENTAILMENT, or CONTRADICTION). * 'entailment\_AB': Entailment label for the A-B order (A\_neutral\_B, A\_entails\_B, or A\_contradicts\_B). * 'entailment\_BA': Entailment label for the B-A order (B\_neutral\_A, B\_entails\_A, or B\_contradicts\_A). * 'original\_SICK\_sentence\_A': The original premise from the English source dataset. * 'original\_SICK\_sentence\_B': The original hypothesis from the English source dataset. * 'sentence\_A\_dataset': The dataset from which the original sentence A was extracted (FLICKR vs. SEMEVAL). * 'sentence\_B\_dataset': The dataset from which the original sentence B was extracted (FLICKR vs. SEMEVAL). ### Data Splits For the A-B order: For the B-A order: Dataset Creation ---------------- The dataset was machine translated from English to French using the latest neural machine translation opus-mt-tc-big model available for French. The translation of the sentences was carried out on November 26th, 2023. Additional Information ---------------------- BibTeX: ' ACL: Marco Marelli, Stefano Menini, Marco Baroni, Luisa Bentivogli, Raffaella Bernardi, and Roberto Zamparelli. 2014. A SICK cure for the evaluation of compositional distributional semantic models. In *Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC'14)*, pages 216–223, Reykjavik, Iceland. European Language Resources Association (ELRA). Jörg Tiedemann and Santhosh Thottingal. 2020. OPUS-MT – Building open translation services for the World. In *Proceedings of the 22nd Annual Conference of the European Association for Machine Translation*, pages 479–480, Lisboa, Portugal. European Association for Machine Translation. ### Acknowledgements This translation of the original dataset was done as part of a research project supported by the Defence Innovation Agency (AID) of the Directorate General of Armament (DGA) of the French Ministry of Armed Forces, and by the ICO, *Institut Cybersécurité Occitanie*, funded by Région Occitanie, France.
[ "### Dataset Summary\n\n\nThis repository contains a machine-translated French version of SICK (Sentences Involving Compositional Knowldedge) dataset. The goal is to predict textual entailment (does sentence A\nimply/contradict/neither sentence B), which is a classification task (given two sentences, predict one of three labels). Apart from machine translating the sentence pairs, the rest of information (pair ID, labels, source dataset of each sentence, train/dev/test subset partition) has been left intact as in the original English dataset.\n\n\nThe dataset is here formatted in a similar manner (TSV format) as the widely used XNLI dataset for convenience.\n\n\nThe machine translation produced in the present repository should be of pretty decent quality, given the average short length of the sentences in SICK dataset.", "### Supported Tasks and Leaderboards\n\n\nThis dataset can be used for the task of Natural Language Inference (NLI), also known as Recognizing Textual Entailment (RTE), which is a sentence-pair classification task.\n\n\nDataset Structure\n-----------------", "### Data Fields\n\n\n* 'pair\\_ID': Sentence pair ID.\n* 'sentence\\_A': Sentence A, also known as premise in other NLI datasets.\n* 'sentence\\_B': Sentence B, also known as hypothesis in other NLI datasets.\n* 'entailment\\_label': textual entailment gold label (NEUTRAL, ENTAILMENT, or CONTRADICTION).\n* 'entailment\\_AB': Entailment label for the A-B order (A\\_neutral\\_B, A\\_entails\\_B, or A\\_contradicts\\_B).\n* 'entailment\\_BA': Entailment label for the B-A order (B\\_neutral\\_A, B\\_entails\\_A, or B\\_contradicts\\_A).\n* 'original\\_SICK\\_sentence\\_A': The original premise from the English source dataset.\n* 'original\\_SICK\\_sentence\\_B': The original hypothesis from the English source dataset.\n* 'sentence\\_A\\_dataset': The dataset from which the original sentence A was extracted (FLICKR vs. SEMEVAL).\n* 'sentence\\_B\\_dataset': The dataset from which the original sentence B was extracted (FLICKR vs. SEMEVAL).", "### Data Splits\n\n\n\nFor the A-B order:\n\n\n\nFor the B-A order:\n\n\n\nDataset Creation\n----------------\n\n\nThe dataset was machine translated from English to French using the latest neural machine translation opus-mt-tc-big model available for French.\nThe translation of the sentences was carried out on November 26th, 2023.\n\n\nAdditional Information\n----------------------\n\n\nBibTeX:\n\n\n'\n\n\nACL:\n\n\nMarco Marelli, Stefano Menini, Marco Baroni, Luisa Bentivogli, Raffaella Bernardi, and Roberto Zamparelli. 2014. A SICK cure for the evaluation of compositional distributional semantic models. In *Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC'14)*, pages 216–223, Reykjavik, Iceland. European Language Resources Association (ELRA).\n\n\nJörg Tiedemann and Santhosh Thottingal. 2020. OPUS-MT – Building open translation services for the World. In *Proceedings of the 22nd Annual Conference of the European Association for Machine Translation*, pages 479–480, Lisboa, Portugal. European Association for Machine Translation.", "### Acknowledgements\n\n\nThis translation of the original dataset was done as part of a research project supported by the Defence Innovation Agency (AID) of the Directorate General of Armament (DGA) of the French Ministry of Armed Forces, and by the ICO, *Institut Cybersécurité Occitanie*, funded by Région Occitanie, France." ]
[ "TAGS\n#task_categories-text-classification #task_ids-natural-language-inference #task_ids-multi-input-text-classification #size_categories-1K<n<10K #language-French #license-cc-by-nc-sa-4.0 #region-us \n", "### Dataset Summary\n\n\nThis repository contains a machine-translated French version of SICK (Sentences Involving Compositional Knowldedge) dataset. The goal is to predict textual entailment (does sentence A\nimply/contradict/neither sentence B), which is a classification task (given two sentences, predict one of three labels). Apart from machine translating the sentence pairs, the rest of information (pair ID, labels, source dataset of each sentence, train/dev/test subset partition) has been left intact as in the original English dataset.\n\n\nThe dataset is here formatted in a similar manner (TSV format) as the widely used XNLI dataset for convenience.\n\n\nThe machine translation produced in the present repository should be of pretty decent quality, given the average short length of the sentences in SICK dataset.", "### Supported Tasks and Leaderboards\n\n\nThis dataset can be used for the task of Natural Language Inference (NLI), also known as Recognizing Textual Entailment (RTE), which is a sentence-pair classification task.\n\n\nDataset Structure\n-----------------", "### Data Fields\n\n\n* 'pair\\_ID': Sentence pair ID.\n* 'sentence\\_A': Sentence A, also known as premise in other NLI datasets.\n* 'sentence\\_B': Sentence B, also known as hypothesis in other NLI datasets.\n* 'entailment\\_label': textual entailment gold label (NEUTRAL, ENTAILMENT, or CONTRADICTION).\n* 'entailment\\_AB': Entailment label for the A-B order (A\\_neutral\\_B, A\\_entails\\_B, or A\\_contradicts\\_B).\n* 'entailment\\_BA': Entailment label for the B-A order (B\\_neutral\\_A, B\\_entails\\_A, or B\\_contradicts\\_A).\n* 'original\\_SICK\\_sentence\\_A': The original premise from the English source dataset.\n* 'original\\_SICK\\_sentence\\_B': The original hypothesis from the English source dataset.\n* 'sentence\\_A\\_dataset': The dataset from which the original sentence A was extracted (FLICKR vs. SEMEVAL).\n* 'sentence\\_B\\_dataset': The dataset from which the original sentence B was extracted (FLICKR vs. SEMEVAL).", "### Data Splits\n\n\n\nFor the A-B order:\n\n\n\nFor the B-A order:\n\n\n\nDataset Creation\n----------------\n\n\nThe dataset was machine translated from English to French using the latest neural machine translation opus-mt-tc-big model available for French.\nThe translation of the sentences was carried out on November 26th, 2023.\n\n\nAdditional Information\n----------------------\n\n\nBibTeX:\n\n\n'\n\n\nACL:\n\n\nMarco Marelli, Stefano Menini, Marco Baroni, Luisa Bentivogli, Raffaella Bernardi, and Roberto Zamparelli. 2014. A SICK cure for the evaluation of compositional distributional semantic models. In *Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC'14)*, pages 216–223, Reykjavik, Iceland. European Language Resources Association (ELRA).\n\n\nJörg Tiedemann and Santhosh Thottingal. 2020. OPUS-MT – Building open translation services for the World. In *Proceedings of the 22nd Annual Conference of the European Association for Machine Translation*, pages 479–480, Lisboa, Portugal. European Association for Machine Translation.", "### Acknowledgements\n\n\nThis translation of the original dataset was done as part of a research project supported by the Defence Innovation Agency (AID) of the Directorate General of Armament (DGA) of the French Ministry of Armed Forces, and by the ICO, *Institut Cybersécurité Occitanie*, funded by Région Occitanie, France." ]
[ 76, 199, 61, 324, 248, 85 ]
[ "passage: TAGS\n#task_categories-text-classification #task_ids-natural-language-inference #task_ids-multi-input-text-classification #size_categories-1K<n<10K #language-French #license-cc-by-nc-sa-4.0 #region-us \n### Dataset Summary\n\n\nThis repository contains a machine-translated French version of SICK (Sentences Involving Compositional Knowldedge) dataset. The goal is to predict textual entailment (does sentence A\nimply/contradict/neither sentence B), which is a classification task (given two sentences, predict one of three labels). Apart from machine translating the sentence pairs, the rest of information (pair ID, labels, source dataset of each sentence, train/dev/test subset partition) has been left intact as in the original English dataset.\n\n\nThe dataset is here formatted in a similar manner (TSV format) as the widely used XNLI dataset for convenience.\n\n\nThe machine translation produced in the present repository should be of pretty decent quality, given the average short length of the sentences in SICK dataset.### Supported Tasks and Leaderboards\n\n\nThis dataset can be used for the task of Natural Language Inference (NLI), also known as Recognizing Textual Entailment (RTE), which is a sentence-pair classification task.\n\n\nDataset Structure\n-----------------" ]
13f659af83fe0caa5211bf09ee724cb969090b66
# Dataset Card for "hh-rlhf-harmyes" Original Dataset: https://huggingface.co/datasets/Anthropic/hh-rlhf [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
maywell/hh-rlhf-harmyes
[ "region:us" ]
2023-11-26T11:37:39+00:00
{"dataset_info": {"features": [{"name": "rejected", "dtype": "string"}, {"name": "chosen", "dtype": "string"}, {"name": "instruction", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 19681345, "num_examples": 42537}], "download_size": 0, "dataset_size": 19681345}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-26T22:41:29+00:00
[]
[]
TAGS #region-us
# Dataset Card for "hh-rlhf-harmyes" Original Dataset: URL More Information needed
[ "# Dataset Card for \"hh-rlhf-harmyes\"\n\nOriginal Dataset: URL\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"hh-rlhf-harmyes\"\n\nOriginal Dataset: URL\n\nMore Information needed" ]
[ 6, 24 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"hh-rlhf-harmyes\"\n\nOriginal Dataset: URL\n\nMore Information needed" ]
483baff3c76ef955c6fc985a989c5cad5d813f8f
license: mit task_categories: - text-classification language: - de tags: - legal pretty_name: HGB size_categories: - 1K<n<10K --- # German HGB Law Dataset (Handelsgesetzbuch) ## Dataset Description - **Date of Last Paragraph Update:** April 2023 - **Dataset Guarantee:** The dataset is provided "as is," and there is no guarantee for the correctness or completeness of the data. ### Dataset Summary The HGB Law Dataset contains legal text from the German Commercial Code (Handelsgesetzbuch - HGB). It focuses on the general principles of German commercial law, and the dataset is designed for tasks related to legal text analysis. ## Dataset Structure ### Data Instances A typical data point in the dataset comprises a legal paragraph and its corresponding text. For example: ```json { 'paragraph': '§ 1 Handelsstand', 'text': 'Wer ein Handelsgewerbe betreibt, ist Kaufmann.' } ```
nookbe/Handelsgesetzbuch_HGB
[ "task_categories:text-classification", "size_categories:1K<n<10K", "language:de", "license:mit", "legal", "region:us" ]
2023-11-26T11:48:25+00:00
{"language": ["de"], "license": "mit", "size_categories": ["1K<n<10K"], "task_categories": ["text-classification"], "pretty_name": "HGB", "tags": ["legal"]}
2023-11-26T11:53:18+00:00
[]
[ "de" ]
TAGS #task_categories-text-classification #size_categories-1K<n<10K #language-German #license-mit #legal #region-us
license: mit task_categories: - text-classification language: - de tags: - legal pretty_name: HGB size_categories: - 1K<n<10K --- # German HGB Law Dataset (Handelsgesetzbuch) ## Dataset Description - Date of Last Paragraph Update: April 2023 - Dataset Guarantee: The dataset is provided "as is," and there is no guarantee for the correctness or completeness of the data. ### Dataset Summary The HGB Law Dataset contains legal text from the German Commercial Code (Handelsgesetzbuch - HGB). It focuses on the general principles of German commercial law, and the dataset is designed for tasks related to legal text analysis. ## Dataset Structure ### Data Instances A typical data point in the dataset comprises a legal paragraph and its corresponding text. For example:
[ "# German HGB Law Dataset (Handelsgesetzbuch)", "## Dataset Description\n\n- Date of Last Paragraph Update: April 2023\n- Dataset Guarantee: The dataset is provided \"as is,\" and there is no guarantee for the correctness or completeness of the data.", "### Dataset Summary\n\nThe HGB Law Dataset contains legal text from the German Commercial Code (Handelsgesetzbuch - HGB). It focuses on the general principles of German commercial law, and the dataset is designed for tasks related to legal text analysis.", "## Dataset Structure", "### Data Instances\n\nA typical data point in the dataset comprises a legal paragraph and its corresponding text. For example:" ]
[ "TAGS\n#task_categories-text-classification #size_categories-1K<n<10K #language-German #license-mit #legal #region-us \n", "# German HGB Law Dataset (Handelsgesetzbuch)", "## Dataset Description\n\n- Date of Last Paragraph Update: April 2023\n- Dataset Guarantee: The dataset is provided \"as is,\" and there is no guarantee for the correctness or completeness of the data.", "### Dataset Summary\n\nThe HGB Law Dataset contains legal text from the German Commercial Code (Handelsgesetzbuch - HGB). It focuses on the general principles of German commercial law, and the dataset is designed for tasks related to legal text analysis.", "## Dataset Structure", "### Data Instances\n\nA typical data point in the dataset comprises a legal paragraph and its corresponding text. For example:" ]
[ 40, 13, 47, 60, 6, 28 ]
[ "passage: TAGS\n#task_categories-text-classification #size_categories-1K<n<10K #language-German #license-mit #legal #region-us \n# German HGB Law Dataset (Handelsgesetzbuch)## Dataset Description\n\n- Date of Last Paragraph Update: April 2023\n- Dataset Guarantee: The dataset is provided \"as is,\" and there is no guarantee for the correctness or completeness of the data.### Dataset Summary\n\nThe HGB Law Dataset contains legal text from the German Commercial Code (Handelsgesetzbuch - HGB). It focuses on the general principles of German commercial law, and the dataset is designed for tasks related to legal text analysis.## Dataset Structure### Data Instances\n\nA typical data point in the dataset comprises a legal paragraph and its corresponding text. For example:" ]
025711a56ed995922ae134a3e4fcab866141edf9
PIPPA and Bluemoon datasets were combined and alternated between the two.
ricecake/PIPPA_and_Bluemoon
[ "region:us" ]
2023-11-26T12:19:51+00:00
{}
2023-11-26T12:47:45+00:00
[]
[]
TAGS #region-us
PIPPA and Bluemoon datasets were combined and alternated between the two.
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
fc0af35e70b23cd4f01e7154e7680728b1aab62a
# Dataset Card for "meu_site_juridico_perguntas_e_respostas" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
celsowm/meu_site_juridico_perguntas_e_respostas
[ "region:us" ]
2023-11-26T12:41:17+00:00
{"dataset_info": {"features": [{"name": "titulo", "dtype": "string"}, {"name": "conteudo", "dtype": "string"}, {"name": "link", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1937556, "num_examples": 1596}], "download_size": 1064237, "dataset_size": 1937556}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-26T12:49:56+00:00
[]
[]
TAGS #region-us
# Dataset Card for "meu_site_juridico_perguntas_e_respostas" More Information needed
[ "# Dataset Card for \"meu_site_juridico_perguntas_e_respostas\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"meu_site_juridico_perguntas_e_respostas\"\n\nMore Information needed" ]
[ 6, 28 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"meu_site_juridico_perguntas_e_respostas\"\n\nMore Information needed" ]
8d425954bf27aeb5c873710c06280e64fa78a290
# **Math QA** This dataset is processed from [camel-ai/math](https://huggingface.co/datasets/camel-ai/math). ## CAMEL: Communicative Agents for “Mind” Exploration of Large Scale Language Model Society - **Github:** https://github.com/lightaime/camel - **Website:** https://www.camel-ai.org/ - **Arxiv Paper:** https://arxiv.org/abs/2303.17760 ## Dataset Summary Math dataset is composed of 50K problem-solution pairs obtained using GPT-4. The dataset problem-solutions pairs generating from 25 math topics, 25 subtopics for each topic and 80 problems for each "topic,subtopic" pairs. ## Data Fields **The data fields for files in `math50k.zip` are as follows:** * `topic`: math topic * `sub_topic`: math subtopic belonging to topic * `question`: refers to the problem the assistant is asked to solve. * `answer`: refers to the solution provided by the assistant. ### Citation ``` @misc{li2023camel, title={CAMEL: Communicative Agents for "Mind" Exploration of Large Scale Language Model Society}, author={Guohao Li and Hasan Abed Al Kader Hammoud and Hani Itani and Dmitrii Khizbullin and Bernard Ghanem}, year={2023}, eprint={2303.17760}, archivePrefix={arXiv}, primaryClass={cs.AI} } ``` ## Disclaimer: This data was synthetically generated by GPT4 and might contain incorrect information. The dataset is there only for research purposes. --- license: cc-by-nc-4.0 ---
rvv-karma/Math-QA
[ "task_categories:text-generation", "language:en", "license:cc-by-nc-4.0", "instruction-finetuning", "question-answering", "arxiv:2303.17760", "region:us" ]
2023-11-26T14:00:10+00:00
{"language": ["en"], "license": "cc-by-nc-4.0", "task_categories": ["text-generation"], "pretty_name": "Math QA", "dataset_info": {"features": [{"name": "topic", "dtype": "string"}, {"name": "sub_topic", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 51443837.9, "num_examples": 35000}, {"name": "val", "num_bytes": 7349119.7, "num_examples": 5000}, {"name": "test", "num_bytes": 14698239.4, "num_examples": 10000}], "download_size": 26881360, "dataset_size": 73491197}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "val", "path": "data/val-*"}, {"split": "test", "path": "data/test-*"}]}], "tags": ["instruction-finetuning", "question-answering"]}
2023-11-26T14:10:01+00:00
[ "2303.17760" ]
[ "en" ]
TAGS #task_categories-text-generation #language-English #license-cc-by-nc-4.0 #instruction-finetuning #question-answering #arxiv-2303.17760 #region-us
# Math QA This dataset is processed from camel-ai/math. ## CAMEL: Communicative Agents for “Mind” Exploration of Large Scale Language Model Society - Github: URL - Website: URL - Arxiv Paper: URL ## Dataset Summary Math dataset is composed of 50K problem-solution pairs obtained using GPT-4. The dataset problem-solutions pairs generating from 25 math topics, 25 subtopics for each topic and 80 problems for each "topic,subtopic" pairs. ## Data Fields The data fields for files in 'URL' are as follows: * 'topic': math topic * 'sub_topic': math subtopic belonging to topic * 'question': refers to the problem the assistant is asked to solve. * 'answer': refers to the solution provided by the assistant. ## Disclaimer: This data was synthetically generated by GPT4 and might contain incorrect information. The dataset is there only for research purposes. --- license: cc-by-nc-4.0 ---
[ "# Math QA\n\nThis dataset is processed from camel-ai/math.", "## CAMEL: Communicative Agents for “Mind” Exploration of Large Scale Language Model Society\n- Github: URL\n- Website: URL\n- Arxiv Paper: URL", "## Dataset Summary\n\nMath dataset is composed of 50K problem-solution pairs obtained using GPT-4. The dataset problem-solutions pairs generating from 25 math topics, 25 subtopics for each topic and 80 problems for each \"topic,subtopic\" pairs.", "## Data Fields\n\nThe data fields for files in 'URL' are as follows:\n\n* 'topic': math topic\n* 'sub_topic': math subtopic belonging to topic\n* 'question': refers to the problem the assistant is asked to solve.\n* 'answer': refers to the solution provided by the assistant.", "## Disclaimer:\n\nThis data was synthetically generated by GPT4 and might contain incorrect information. The dataset is there only for research purposes.\n\n---\nlicense: cc-by-nc-4.0\n---" ]
[ "TAGS\n#task_categories-text-generation #language-English #license-cc-by-nc-4.0 #instruction-finetuning #question-answering #arxiv-2303.17760 #region-us \n", "# Math QA\n\nThis dataset is processed from camel-ai/math.", "## CAMEL: Communicative Agents for “Mind” Exploration of Large Scale Language Model Society\n- Github: URL\n- Website: URL\n- Arxiv Paper: URL", "## Dataset Summary\n\nMath dataset is composed of 50K problem-solution pairs obtained using GPT-4. The dataset problem-solutions pairs generating from 25 math topics, 25 subtopics for each topic and 80 problems for each \"topic,subtopic\" pairs.", "## Data Fields\n\nThe data fields for files in 'URL' are as follows:\n\n* 'topic': math topic\n* 'sub_topic': math subtopic belonging to topic\n* 'question': refers to the problem the assistant is asked to solve.\n* 'answer': refers to the solution provided by the assistant.", "## Disclaimer:\n\nThis data was synthetically generated by GPT4 and might contain incorrect information. The dataset is there only for research purposes.\n\n---\nlicense: cc-by-nc-4.0\n---" ]
[ 52, 18, 39, 64, 74, 44 ]
[ "passage: TAGS\n#task_categories-text-generation #language-English #license-cc-by-nc-4.0 #instruction-finetuning #question-answering #arxiv-2303.17760 #region-us \n# Math QA\n\nThis dataset is processed from camel-ai/math.## CAMEL: Communicative Agents for “Mind” Exploration of Large Scale Language Model Society\n- Github: URL\n- Website: URL\n- Arxiv Paper: URL## Dataset Summary\n\nMath dataset is composed of 50K problem-solution pairs obtained using GPT-4. The dataset problem-solutions pairs generating from 25 math topics, 25 subtopics for each topic and 80 problems for each \"topic,subtopic\" pairs.## Data Fields\n\nThe data fields for files in 'URL' are as follows:\n\n* 'topic': math topic\n* 'sub_topic': math subtopic belonging to topic\n* 'question': refers to the problem the assistant is asked to solve.\n* 'answer': refers to the solution provided by the assistant.## Disclaimer:\n\nThis data was synthetically generated by GPT4 and might contain incorrect information. The dataset is there only for research purposes.\n\n---\nlicense: cc-by-nc-4.0\n---" ]
bcb339e04b249534dd9c3842f8b238030e44af19
# DarijaBridge Dataset Card ### General Information - **Dataset Name:** DarijaBridge - **Version:** 1.0 - **Creator:** MAD-Community - **Language:** Darija (Moroccan Arabic) and English - **Total Tokens:** 41,845,467 (in 'sentence' column) - **Task:** Machine Translation ### Dataset Summary DarijaBridge is a community-driven bilingual corpus designed for machine translation tasks between Darija (Moroccan Arabic) and English. Created by MAD-Community, it encompasses a wide range of the Moroccan "dialects" and colloquial expressions, reflecting the linguistic diversity of Morocco. The dataset is particularly valuable for developing and fine-tuning leading MT models like NLLB, improving translation accuracy and cultural relevance. ### Intended Use This dataset is intended for use in machine translation research and applications, especially for those focusing on underrepresented languages and dialects like Darija. It's suitable for training models to translate between English and Darija and can be a crucial resource for linguistic studies and fostering cross-cultural communication. ## Data Collection and Preparation ### Data Source The data in DarijaBridge has been contributed by the MAD-Community, comprising native Darija speakers and language experts. Contributions are ongoing, and the dataset is regularly updated with new translations and linguistic input. ### Methodology Data is collected through community contributions, ensuring a diverse representation of dialects and usage. Each sentence in Darija is paired with its English translation, reviewed and corrected by language experts and expert models (like GPT-4) for accuracy. ## Dataset Structure ### Data Fields - `sentence`: Contains the original sentence in Darija. - `translation`: Contains the corresponding English translation of the Darija sentence. - `quality`: Indicates the quality of the text in the sentence column (1 for high quality). - `metadata`: Includes additional information like language, dialect, source, etc. ### Data Splits The dataset is currently not split into standard training, validation, and test sets. Users are encouraged to create splits as per their specific research or application needs. ## Additional Information ### Limitations and Bias As the dataset is community-contributed, there may be variations in translation quality and style. Efforts are made to standardize and review translations, but users should be aware of potential inconsistencies. ### Licensing Information The DarijaBridge dataset is provided under the Apache 2.0 license.
M-A-D/DarijaBridge
[ "task_categories:translation", "size_categories:1M<n<10M", "language:ar", "language:en", "license:apache-2.0", "region:us" ]
2023-11-26T14:08:17+00:00
{"language": ["ar", "en"], "license": "apache-2.0", "size_categories": ["1M<n<10M"], "task_categories": ["translation"], "pretty_name": "DarijaBridge", "dataset_info": {"features": [{"name": "sentence", "dtype": "string"}, {"name": "translation", "dtype": "string"}, {"name": "translated", "dtype": "bool"}, {"name": "corrected", "dtype": "bool"}, {"name": "correction", "dtype": "string"}, {"name": "quality", "dtype": "int64"}, {"name": "metadata", "struct": [{"name": "config", "dtype": "string"}, {"name": "dataset", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "split", "dtype": "string"}, {"name": "template", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 343412514, "num_examples": 1235091}], "download_size": 133902523, "dataset_size": 343412514}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-26T14:17:11+00:00
[]
[ "ar", "en" ]
TAGS #task_categories-translation #size_categories-1M<n<10M #language-Arabic #language-English #license-apache-2.0 #region-us
# DarijaBridge Dataset Card ### General Information - Dataset Name: DarijaBridge - Version: 1.0 - Creator: MAD-Community - Language: Darija (Moroccan Arabic) and English - Total Tokens: 41,845,467 (in 'sentence' column) - Task: Machine Translation ### Dataset Summary DarijaBridge is a community-driven bilingual corpus designed for machine translation tasks between Darija (Moroccan Arabic) and English. Created by MAD-Community, it encompasses a wide range of the Moroccan "dialects" and colloquial expressions, reflecting the linguistic diversity of Morocco. The dataset is particularly valuable for developing and fine-tuning leading MT models like NLLB, improving translation accuracy and cultural relevance. ### Intended Use This dataset is intended for use in machine translation research and applications, especially for those focusing on underrepresented languages and dialects like Darija. It's suitable for training models to translate between English and Darija and can be a crucial resource for linguistic studies and fostering cross-cultural communication. ## Data Collection and Preparation ### Data Source The data in DarijaBridge has been contributed by the MAD-Community, comprising native Darija speakers and language experts. Contributions are ongoing, and the dataset is regularly updated with new translations and linguistic input. ### Methodology Data is collected through community contributions, ensuring a diverse representation of dialects and usage. Each sentence in Darija is paired with its English translation, reviewed and corrected by language experts and expert models (like GPT-4) for accuracy. ## Dataset Structure ### Data Fields - 'sentence': Contains the original sentence in Darija. - 'translation': Contains the corresponding English translation of the Darija sentence. - 'quality': Indicates the quality of the text in the sentence column (1 for high quality). - 'metadata': Includes additional information like language, dialect, source, etc. ### Data Splits The dataset is currently not split into standard training, validation, and test sets. Users are encouraged to create splits as per their specific research or application needs. ## Additional Information ### Limitations and Bias As the dataset is community-contributed, there may be variations in translation quality and style. Efforts are made to standardize and review translations, but users should be aware of potential inconsistencies. ### Licensing Information The DarijaBridge dataset is provided under the Apache 2.0 license.
[ "# DarijaBridge Dataset Card", "### General Information\n\n- Dataset Name: DarijaBridge\n- Version: 1.0\n- Creator: MAD-Community\n- Language: Darija (Moroccan Arabic) and English\n- Total Tokens: 41,845,467 (in 'sentence' column)\n- Task: Machine Translation", "### Dataset Summary\n\nDarijaBridge is a community-driven bilingual corpus designed for machine translation tasks between Darija (Moroccan Arabic) and English. Created by MAD-Community, it encompasses a wide range of the Moroccan \"dialects\" and colloquial expressions, reflecting the linguistic diversity of Morocco. The dataset is particularly valuable for developing and fine-tuning leading MT models like NLLB, improving translation accuracy and cultural relevance.", "### Intended Use\n\nThis dataset is intended for use in machine translation research and applications, especially for those focusing on underrepresented languages and dialects like Darija. It's suitable for training models to translate between English and Darija and can be a crucial resource for linguistic studies and fostering cross-cultural communication.", "## Data Collection and Preparation", "### Data Source\n\nThe data in DarijaBridge has been contributed by the MAD-Community, comprising native Darija speakers and language experts. Contributions are ongoing, and the dataset is regularly updated with new translations and linguistic input.", "### Methodology\n\nData is collected through community contributions, ensuring a diverse representation of dialects and usage. Each sentence in Darija is paired with its English translation, reviewed and corrected by language experts and expert models (like GPT-4) for accuracy.", "## Dataset Structure", "### Data Fields\n\n- 'sentence': Contains the original sentence in Darija.\n- 'translation': Contains the corresponding English translation of the Darija sentence.\n- 'quality': Indicates the quality of the text in the sentence column (1 for high quality).\n- 'metadata': Includes additional information like language, dialect, source, etc.", "### Data Splits\n\nThe dataset is currently not split into standard training, validation, and test sets. Users are encouraged to create splits as per their specific research or application needs.", "## Additional Information", "### Limitations and Bias\n\nAs the dataset is community-contributed, there may be variations in translation quality and style. Efforts are made to standardize and review translations, but users should be aware of potential inconsistencies.", "### Licensing Information\n\nThe DarijaBridge dataset is provided under the Apache 2.0 license." ]
[ "TAGS\n#task_categories-translation #size_categories-1M<n<10M #language-Arabic #language-English #license-apache-2.0 #region-us \n", "# DarijaBridge Dataset Card", "### General Information\n\n- Dataset Name: DarijaBridge\n- Version: 1.0\n- Creator: MAD-Community\n- Language: Darija (Moroccan Arabic) and English\n- Total Tokens: 41,845,467 (in 'sentence' column)\n- Task: Machine Translation", "### Dataset Summary\n\nDarijaBridge is a community-driven bilingual corpus designed for machine translation tasks between Darija (Moroccan Arabic) and English. Created by MAD-Community, it encompasses a wide range of the Moroccan \"dialects\" and colloquial expressions, reflecting the linguistic diversity of Morocco. The dataset is particularly valuable for developing and fine-tuning leading MT models like NLLB, improving translation accuracy and cultural relevance.", "### Intended Use\n\nThis dataset is intended for use in machine translation research and applications, especially for those focusing on underrepresented languages and dialects like Darija. It's suitable for training models to translate between English and Darija and can be a crucial resource for linguistic studies and fostering cross-cultural communication.", "## Data Collection and Preparation", "### Data Source\n\nThe data in DarijaBridge has been contributed by the MAD-Community, comprising native Darija speakers and language experts. Contributions are ongoing, and the dataset is regularly updated with new translations and linguistic input.", "### Methodology\n\nData is collected through community contributions, ensuring a diverse representation of dialects and usage. Each sentence in Darija is paired with its English translation, reviewed and corrected by language experts and expert models (like GPT-4) for accuracy.", "## Dataset Structure", "### Data Fields\n\n- 'sentence': Contains the original sentence in Darija.\n- 'translation': Contains the corresponding English translation of the Darija sentence.\n- 'quality': Indicates the quality of the text in the sentence column (1 for high quality).\n- 'metadata': Includes additional information like language, dialect, source, etc.", "### Data Splits\n\nThe dataset is currently not split into standard training, validation, and test sets. Users are encouraged to create splits as per their specific research or application needs.", "## Additional Information", "### Limitations and Bias\n\nAs the dataset is community-contributed, there may be variations in translation quality and style. Efforts are made to standardize and review translations, but users should be aware of potential inconsistencies.", "### Licensing Information\n\nThe DarijaBridge dataset is provided under the Apache 2.0 license." ]
[ 44, 8, 63, 112, 72, 6, 56, 61, 6, 82, 42, 5, 54, 22 ]
[ "passage: TAGS\n#task_categories-translation #size_categories-1M<n<10M #language-Arabic #language-English #license-apache-2.0 #region-us \n# DarijaBridge Dataset Card### General Information\n\n- Dataset Name: DarijaBridge\n- Version: 1.0\n- Creator: MAD-Community\n- Language: Darija (Moroccan Arabic) and English\n- Total Tokens: 41,845,467 (in 'sentence' column)\n- Task: Machine Translation### Dataset Summary\n\nDarijaBridge is a community-driven bilingual corpus designed for machine translation tasks between Darija (Moroccan Arabic) and English. Created by MAD-Community, it encompasses a wide range of the Moroccan \"dialects\" and colloquial expressions, reflecting the linguistic diversity of Morocco. The dataset is particularly valuable for developing and fine-tuning leading MT models like NLLB, improving translation accuracy and cultural relevance.### Intended Use\n\nThis dataset is intended for use in machine translation research and applications, especially for those focusing on underrepresented languages and dialects like Darija. It's suitable for training models to translate between English and Darija and can be a crucial resource for linguistic studies and fostering cross-cultural communication.## Data Collection and Preparation### Data Source\n\nThe data in DarijaBridge has been contributed by the MAD-Community, comprising native Darija speakers and language experts. Contributions are ongoing, and the dataset is regularly updated with new translations and linguistic input.### Methodology\n\nData is collected through community contributions, ensuring a diverse representation of dialects and usage. Each sentence in Darija is paired with its English translation, reviewed and corrected by language experts and expert models (like GPT-4) for accuracy.## Dataset Structure" ]
2759c729b734e4a2891218386dd25b8ef8e8fcc6
1. **Columns:** - **Non-Technical Role:** Specifies the role being assessed (e.g., Content Developer). - **Assessment Domain:** Denotes the skill or attribute being evaluated (e.g., Adaptability). - **Question:** The actual assessment question. 2. **Content Overview:** - The dataset is focused on assessing various competencies and skills relevant to non-technical roles. - Questions are tailored to evaluate how individuals in these roles handle various situations and challenges. 3. **Example Entries:** - Role: Content Developer, Domain: Adaptability, Question: "How do you adapt your content strategy in response to audience feedback?" - Role: Content Developer, Domain: Adaptability, Question: "How do you handle unexpected changes in project requirements?" - Role: Content Developer, Domain: Adaptability, Question: "Can you provide an example of a time you had to quickly adjust your work priorities?"
Sumsam/QnA_for_Non-Technical_Roles
[ "license:mit", "region:us" ]
2023-11-26T15:03:18+00:00
{"license": "mit"}
2023-11-29T11:33:07+00:00
[]
[]
TAGS #license-mit #region-us
1. Columns: - Non-Technical Role: Specifies the role being assessed (e.g., Content Developer). - Assessment Domain: Denotes the skill or attribute being evaluated (e.g., Adaptability). - Question: The actual assessment question. 2. Content Overview: - The dataset is focused on assessing various competencies and skills relevant to non-technical roles. - Questions are tailored to evaluate how individuals in these roles handle various situations and challenges. 3. Example Entries: - Role: Content Developer, Domain: Adaptability, Question: "How do you adapt your content strategy in response to audience feedback?" - Role: Content Developer, Domain: Adaptability, Question: "How do you handle unexpected changes in project requirements?" - Role: Content Developer, Domain: Adaptability, Question: "Can you provide an example of a time you had to quickly adjust your work priorities?"
[]
[ "TAGS\n#license-mit #region-us \n" ]
[ 11 ]
[ "passage: TAGS\n#license-mit #region-us \n" ]
b4285637564d2566cee0ebd55f2b1fe7f44534c9
# Dataset Card for "Urdu-Instruct-News-Article-Generation" This Dataset is converted from the [original dataset](https://data.mendeley.com/datasets/834vsxnb99/3) by Khalid Hussain, Nimra Mughal, Irfan Ali, Saif Hassan, Sher Muhammad Daudpota. ## Task: Generate the News Article from the given headline. ## Split Size: - train: 100674 - test: 11187 ## Prompt Template (In Urdu): Random.choice b.w these 2. The First template is **template_id** 1 and the second template is **template_id** 2 in the dataset. ``` [ "اس دی گی ایک خبر سے متعلق ایک مضمون لکھیں۔ خبر: {}", "یہ خبر جو {} سے تعلق رکھتی ہے، اس پر ایک مضمون لکھیں۔ خبر: {}" ] ``` <b>Translation</b>: ``` 1. Write an article from the given news. news: {} 2. Given the news belonging to category {}, write an article on it. news: {} ``` ## Completion Template (In Urdu) ``` جی ضرور، یہ رہا آپ کی خبر سے متعلق ایک مضمون: {} ``` <b>Translation</b>: ``` Sure, here is the article related to the given news {} ```
AhmadMustafa/Urdu-Instruct-News-Article-Generation
[ "task_categories:text-generation", "task_categories:text2text-generation", "size_categories:100K<n<1M", "language:ur", "region:us" ]
2023-11-26T15:29:57+00:00
{"language": ["ur"], "size_categories": ["100K<n<1M"], "task_categories": ["text-generation", "text2text-generation"], "pretty_name": "Instruct News Article Generation Urdu", "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "template_id", "dtype": "int64"}, {"name": "template_lang", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 257489376, "num_examples": 100674}, {"name": "test", "num_bytes": 28303926, "num_examples": 11187}], "download_size": 121675545, "dataset_size": 285793302}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]}
2024-01-24T06:14:51+00:00
[]
[ "ur" ]
TAGS #task_categories-text-generation #task_categories-text2text-generation #size_categories-100K<n<1M #language-Urdu #region-us
# Dataset Card for "Urdu-Instruct-News-Article-Generation" This Dataset is converted from the original dataset by Khalid Hussain, Nimra Mughal, Irfan Ali, Saif Hassan, Sher Muhammad Daudpota. ## Task: Generate the News Article from the given headline. ## Split Size: - train: 100674 - test: 11187 ## Prompt Template (In Urdu): URL b.w these 2. The First template is template_id 1 and the second template is template_id 2 in the dataset. <b>Translation</b>: ## Completion Template (In Urdu) <b>Translation</b>:
[ "# Dataset Card for \"Urdu-Instruct-News-Article-Generation\"\n\nThis Dataset is converted from the original dataset by Khalid Hussain, Nimra Mughal, Irfan Ali, Saif Hassan, Sher Muhammad Daudpota.", "## Task:\nGenerate the News Article from the given headline.", "## Split Size:\n- train: 100674\n- test: 11187", "## Prompt Template (In Urdu):\nURL b.w these 2. The First template is template_id 1 and the second template is template_id 2 in the dataset.\n\n<b>Translation</b>:", "## Completion Template (In Urdu)\n\n<b>Translation</b>:" ]
[ "TAGS\n#task_categories-text-generation #task_categories-text2text-generation #size_categories-100K<n<1M #language-Urdu #region-us \n", "# Dataset Card for \"Urdu-Instruct-News-Article-Generation\"\n\nThis Dataset is converted from the original dataset by Khalid Hussain, Nimra Mughal, Irfan Ali, Saif Hassan, Sher Muhammad Daudpota.", "## Task:\nGenerate the News Article from the given headline.", "## Split Size:\n- train: 100674\n- test: 11187", "## Prompt Template (In Urdu):\nURL b.w these 2. The First template is template_id 1 and the second template is template_id 2 in the dataset.\n\n<b>Translation</b>:", "## Completion Template (In Urdu)\n\n<b>Translation</b>:" ]
[ 47, 53, 15, 15, 46, 18 ]
[ "passage: TAGS\n#task_categories-text-generation #task_categories-text2text-generation #size_categories-100K<n<1M #language-Urdu #region-us \n# Dataset Card for \"Urdu-Instruct-News-Article-Generation\"\n\nThis Dataset is converted from the original dataset by Khalid Hussain, Nimra Mughal, Irfan Ali, Saif Hassan, Sher Muhammad Daudpota.## Task:\nGenerate the News Article from the given headline.## Split Size:\n- train: 100674\n- test: 11187## Prompt Template (In Urdu):\nURL b.w these 2. The First template is template_id 1 and the second template is template_id 2 in the dataset.\n\n<b>Translation</b>:## Completion Template (In Urdu)\n\n<b>Translation</b>:" ]
ca9866fc035a4d628052fe9011ee050bfe0dc539
# Dataset Card for "Urdu-Instruct-News-Category-Classification" This Dataset is converted from the [original dataset](https://data.mendeley.com/datasets/834vsxnb99/3) by Khalid Hussain, Nimra Mughal, Irfan Ali, Saif Hassan, Sher Muhammad Daudpota. ## Task: Generate the News Paragraph, and classify the news category from it. ## Split Size: - train: 100674 - test: 11187 ## Prompt Template (In Urdu): Random.choice b.w these 2. The first template is **template_id** 1 in the dataset, second template is **template_id** 2 in the dataset. ``` ["""اس پیراگراف (خبروں) کو مندرجہ ذیل درجوں (کیٹگریز) میں سے ایک کیٹگری میں ڈالیں. درجے: - کھیل - تفریح، - کاروبار و معیشت، - سائنس اور ٹیکنالوجی پیراگراف: {}""", """دیے گی خبروں کو ایک لفظ یا درجہ (کیٹگری) میں بیان کریں. درجے: - کھیل - تفریح، - کاروبار و معیشت، - سائنس اور ٹیکنالوجی جملے: {}"""] ``` <b>Translation</b>: ``` [Classify the given paragraph (news article) into one of the following categories: 1. Sports 2. Entertainment 3. Business & Economics 4. Science & Technology paragraph: {}, Classify the given news paragraph into one of the following categories: 1. Sports 2. Entertainment 3. Business & Economics 4. Science & Technology sentences: {}] ``` ## Completion Template (In Urdu) ``` یہ دی گی خبریں {} سے تعلق رکھتی ہیں ``` <b>Translation</b>: ``` The given news belong to the {category} ```
AhmadMustafa/Urdu-Instruct-News-Category-Classification
[ "task_categories:text-classification", "task_categories:text-generation", "task_categories:text2text-generation", "size_categories:100K<n<1M", "language:ur", "region:us" ]
2023-11-26T15:35:37+00:00
{"language": ["ur"], "size_categories": ["100K<n<1M"], "task_categories": ["text-classification", "text-generation", "text2text-generation"], "pretty_name": "Instruct News Category Classification Urdu", "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "template_id", "dtype": "int64"}, {"name": "template_lang", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 261557910, "num_examples": 100674}, {"name": "test", "num_bytes": 29048190, "num_examples": 11187}], "download_size": 116629288, "dataset_size": 290606100}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]}
2024-01-24T06:10:53+00:00
[]
[ "ur" ]
TAGS #task_categories-text-classification #task_categories-text-generation #task_categories-text2text-generation #size_categories-100K<n<1M #language-Urdu #region-us
# Dataset Card for "Urdu-Instruct-News-Category-Classification" This Dataset is converted from the original dataset by Khalid Hussain, Nimra Mughal, Irfan Ali, Saif Hassan, Sher Muhammad Daudpota. ## Task: Generate the News Paragraph, and classify the news category from it. ## Split Size: - train: 100674 - test: 11187 ## Prompt Template (In Urdu): URL b.w these 2. The first template is template_id 1 in the dataset, second template is template_id 2 in the dataset. <b>Translation</b>: ## Completion Template (In Urdu) <b>Translation</b>:
[ "# Dataset Card for \"Urdu-Instruct-News-Category-Classification\"\n\nThis Dataset is converted from the original dataset by Khalid Hussain, Nimra Mughal, Irfan Ali, Saif Hassan, Sher Muhammad Daudpota.", "## Task:\nGenerate the News Paragraph, and classify the news category from it.", "## Split Size:\n- train: 100674\n- test: 11187", "## Prompt Template (In Urdu):\nURL b.w these 2. The first template is template_id 1 in the dataset, second template is template_id 2 in the dataset.\n\n<b>Translation</b>:", "## Completion Template (In Urdu)\n\n<b>Translation</b>:" ]
[ "TAGS\n#task_categories-text-classification #task_categories-text-generation #task_categories-text2text-generation #size_categories-100K<n<1M #language-Urdu #region-us \n", "# Dataset Card for \"Urdu-Instruct-News-Category-Classification\"\n\nThis Dataset is converted from the original dataset by Khalid Hussain, Nimra Mughal, Irfan Ali, Saif Hassan, Sher Muhammad Daudpota.", "## Task:\nGenerate the News Paragraph, and classify the news category from it.", "## Split Size:\n- train: 100674\n- test: 11187", "## Prompt Template (In Urdu):\nURL b.w these 2. The first template is template_id 1 in the dataset, second template is template_id 2 in the dataset.\n\n<b>Translation</b>:", "## Completion Template (In Urdu)\n\n<b>Translation</b>:" ]
[ 58, 54, 20, 15, 49, 18 ]
[ "passage: TAGS\n#task_categories-text-classification #task_categories-text-generation #task_categories-text2text-generation #size_categories-100K<n<1M #language-Urdu #region-us \n# Dataset Card for \"Urdu-Instruct-News-Category-Classification\"\n\nThis Dataset is converted from the original dataset by Khalid Hussain, Nimra Mughal, Irfan Ali, Saif Hassan, Sher Muhammad Daudpota.## Task:\nGenerate the News Paragraph, and classify the news category from it.## Split Size:\n- train: 100674\n- test: 11187## Prompt Template (In Urdu):\nURL b.w these 2. The first template is template_id 1 in the dataset, second template is template_id 2 in the dataset.\n\n<b>Translation</b>:## Completion Template (In Urdu)\n\n<b>Translation</b>:" ]
0a7a575c9687d93ee5449d8f2cba4954fb5945cd
# Dataset Card for "Urdu-Instruct-News-Headline-Generation" This Dataset is converted from the [original dataset](https://data.mendeley.com/datasets/834vsxnb99/3) by Khalid Hussain, Nimra Mughal, Irfan Ali, Saif Hassan, Sher Muhammad Daudpota. ## Task: Generate the News Headline from the given News. ## Split Size: - train: 100674 - test: 11187 ## Prompt Template (In Urdu): Random.choice b.w these 2. The first template is **template_id** 1, and the second template is **template_id** 2 in the dataset. ``` ["اس اردو پیراگراف (خبروں) کا عنوان تجویز کریں پیراگراف: {}", "دیے گے خبروں کا عنوان تجویز کریں. جملے: {}" ] ``` <b>Translation</b>: ``` 1. Write a title for the following news article: paragraph: {} 2. Suggest the title of the given the sentences sentences: {} ``` ## Completion Template (In Urdu) ``` جی ضرور، یہ رہا آپ کے پیراگراف کا عنوان: {} ``` <b>Translation</b>: ``` Sure, here is the title of the given article {} ```
AhmadMustafa/Urdu-Instruct-News-Headline-Generation
[ "task_categories:text-generation", "task_categories:summarization", "size_categories:100K<n<1M", "language:ur", "region:us" ]
2023-11-26T15:40:27+00:00
{"language": ["ur"], "size_categories": ["100K<n<1M"], "task_categories": ["text-generation", "summarization"], "pretty_name": "Urdu Instruct News Headline Generation", "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "template_id", "dtype": "int64"}, {"name": "template_lang", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 255002720, "num_examples": 100674}, {"name": "test", "num_bytes": 28284699, "num_examples": 11187}], "download_size": 121546528, "dataset_size": 283287419}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]}
2024-01-24T06:11:35+00:00
[]
[ "ur" ]
TAGS #task_categories-text-generation #task_categories-summarization #size_categories-100K<n<1M #language-Urdu #region-us
# Dataset Card for "Urdu-Instruct-News-Headline-Generation" This Dataset is converted from the original dataset by Khalid Hussain, Nimra Mughal, Irfan Ali, Saif Hassan, Sher Muhammad Daudpota. ## Task: Generate the News Headline from the given News. ## Split Size: - train: 100674 - test: 11187 ## Prompt Template (In Urdu): URL b.w these 2. The first template is template_id 1, and the second template is template_id 2 in the dataset. <b>Translation</b>: ## Completion Template (In Urdu) <b>Translation</b>:
[ "# Dataset Card for \"Urdu-Instruct-News-Headline-Generation\"\n\nThis Dataset is converted from the original dataset by Khalid Hussain, Nimra Mughal, Irfan Ali, Saif Hassan, Sher Muhammad Daudpota.", "## Task:\nGenerate the News Headline from the given News.", "## Split Size:\n- train: 100674\n- test: 11187", "## Prompt Template (In Urdu):\nURL b.w these 2. The first template is template_id 1, and the second template is template_id 2 in the dataset.\n\n<b>Translation</b>:", "## Completion Template (In Urdu)\n\n<b>Translation</b>:" ]
[ "TAGS\n#task_categories-text-generation #task_categories-summarization #size_categories-100K<n<1M #language-Urdu #region-us \n", "# Dataset Card for \"Urdu-Instruct-News-Headline-Generation\"\n\nThis Dataset is converted from the original dataset by Khalid Hussain, Nimra Mughal, Irfan Ali, Saif Hassan, Sher Muhammad Daudpota.", "## Task:\nGenerate the News Headline from the given News.", "## Split Size:\n- train: 100674\n- test: 11187", "## Prompt Template (In Urdu):\nURL b.w these 2. The first template is template_id 1, and the second template is template_id 2 in the dataset.\n\n<b>Translation</b>:", "## Completion Template (In Urdu)\n\n<b>Translation</b>:" ]
[ 44, 54, 15, 15, 47, 18 ]
[ "passage: TAGS\n#task_categories-text-generation #task_categories-summarization #size_categories-100K<n<1M #language-Urdu #region-us \n# Dataset Card for \"Urdu-Instruct-News-Headline-Generation\"\n\nThis Dataset is converted from the original dataset by Khalid Hussain, Nimra Mughal, Irfan Ali, Saif Hassan, Sher Muhammad Daudpota.## Task:\nGenerate the News Headline from the given News.## Split Size:\n- train: 100674\n- test: 11187## Prompt Template (In Urdu):\nURL b.w these 2. The first template is template_id 1, and the second template is template_id 2 in the dataset.\n\n<b>Translation</b>:## Completion Template (In Urdu)\n\n<b>Translation</b>:" ]
83188e5394182f28d80c194ae94f5205cb70f891
# Dataset Card for Dataset Name This dataset was created by modifying and adapting the [allenai/QASPER: a dataset for question answering on scientific research papers](https://huggingface.co/datasets/allenai/qasper) dataset and **aims to generate Question-Answer Pairs from the Abstract, Introduction of an NLP Paper**. ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - First, we extracted the abstract, introduction of each NLP paper from QASPER dataset. - We also extracted only the rows labeled question and answer that had an abstract answer rather than extractive. - train : 421 rows - validation : 211 rows - test : 320 rows - **Curated by:** [@UNIST-Eunchan](https://huggingface.co/UNIST-Eunchan) - ### Dataset Sources This data is made by applying and processing [allenai/qasper](https://huggingface.co/datasets/allenai/qasper) <!-- Provide the basic links for the dataset. --> - **Repository:** [allenai/qasper](https://huggingface.co/datasets/allenai/qasper) ## Uses - **Question Generation from Research Paper** - **Long-Document Summarization** - **Question-based Summarization** <!-- Address questions around how the dataset is intended to be used. --> ## Dataset Creation ### Curation Rationale Long Document Summarization datasets, especially those for Research Paper Summarization, are very limited and scarce. We tweak the existing data to provide domains and QA pairs specific to NLP among Research Papers. We expect to be able to generate multiple QA pairs if we let the model sample through training. We will release the fine-tuned model in the future.
UNIST-Eunchan/NLP-Paper-to-QA-Generation
[ "task_categories:summarization", "task_categories:question-answering", "size_categories:n<1K", "language:en", "license:mit", "nlp-research-paper-abstract", "nlp-research-paper", "question-generation", "region:us" ]
2023-11-26T16:25:39+00:00
{"language": ["en"], "license": "mit", "size_categories": ["n<1K"], "task_categories": ["summarization", "question-answering"], "pretty_name": "NLP_Papers_to_Question_Generation", "dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "abstract", "dtype": "string"}, {"name": "introduction", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1844987, "num_examples": 421}, {"name": "validation", "num_bytes": 949747, "num_examples": 211}, {"name": "test", "num_bytes": 1403003, "num_examples": 320}], "download_size": 2341682, "dataset_size": 4197737}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "tags": ["nlp-research-paper-abstract", "nlp-research-paper", "question-generation"]}
2023-11-26T17:04:13+00:00
[]
[ "en" ]
TAGS #task_categories-summarization #task_categories-question-answering #size_categories-n<1K #language-English #license-mit #nlp-research-paper-abstract #nlp-research-paper #question-generation #region-us
# Dataset Card for Dataset Name This dataset was created by modifying and adapting the allenai/QASPER: a dataset for question answering on scientific research papers dataset and aims to generate Question-Answer Pairs from the Abstract, Introduction of an NLP Paper. ### Dataset Description - First, we extracted the abstract, introduction of each NLP paper from QASPER dataset. - We also extracted only the rows labeled question and answer that had an abstract answer rather than extractive. - train : 421 rows - validation : 211 rows - test : 320 rows - Curated by: @UNIST-Eunchan - ### Dataset Sources This data is made by applying and processing allenai/qasper - Repository: allenai/qasper ## Uses - Question Generation from Research Paper - Long-Document Summarization - Question-based Summarization ## Dataset Creation ### Curation Rationale Long Document Summarization datasets, especially those for Research Paper Summarization, are very limited and scarce. We tweak the existing data to provide domains and QA pairs specific to NLP among Research Papers. We expect to be able to generate multiple QA pairs if we let the model sample through training. We will release the fine-tuned model in the future.
[ "# Dataset Card for Dataset Name\nThis dataset was created by modifying and adapting the allenai/QASPER: a dataset for question answering on scientific research papers dataset \nand aims to generate Question-Answer Pairs from the Abstract, Introduction of an NLP Paper.", "### Dataset Description\n\n\n- First, we extracted the abstract, introduction of each NLP paper from QASPER dataset.\n- We also extracted only the rows labeled question and answer that had an abstract answer rather than extractive. \n- train : 421 rows\n- validation : 211 rows\n- test : 320 rows\n\n- Curated by: @UNIST-Eunchan\n\n-", "### Dataset Sources\n\nThis data is made by applying and processing allenai/qasper\n\n\n\n- Repository: allenai/qasper", "## Uses\n\n- Question Generation from Research Paper\n- Long-Document Summarization \n- Question-based Summarization", "## Dataset Creation", "### Curation Rationale\n\nLong Document Summarization datasets, especially those for Research Paper Summarization, are very limited and scarce. \n\nWe tweak the existing data to provide domains and QA pairs specific to NLP among Research Papers. \n\nWe expect to be able to generate multiple QA pairs if we let the model sample through training. \n\nWe will release the fine-tuned model in the future." ]
[ "TAGS\n#task_categories-summarization #task_categories-question-answering #size_categories-n<1K #language-English #license-mit #nlp-research-paper-abstract #nlp-research-paper #question-generation #region-us \n", "# Dataset Card for Dataset Name\nThis dataset was created by modifying and adapting the allenai/QASPER: a dataset for question answering on scientific research papers dataset \nand aims to generate Question-Answer Pairs from the Abstract, Introduction of an NLP Paper.", "### Dataset Description\n\n\n- First, we extracted the abstract, introduction of each NLP paper from QASPER dataset.\n- We also extracted only the rows labeled question and answer that had an abstract answer rather than extractive. \n- train : 421 rows\n- validation : 211 rows\n- test : 320 rows\n\n- Curated by: @UNIST-Eunchan\n\n-", "### Dataset Sources\n\nThis data is made by applying and processing allenai/qasper\n\n\n\n- Repository: allenai/qasper", "## Uses\n\n- Question Generation from Research Paper\n- Long-Document Summarization \n- Question-based Summarization", "## Dataset Creation", "### Curation Rationale\n\nLong Document Summarization datasets, especially those for Research Paper Summarization, are very limited and scarce. \n\nWe tweak the existing data to provide domains and QA pairs specific to NLP among Research Papers. \n\nWe expect to be able to generate multiple QA pairs if we let the model sample through training. \n\nWe will release the fine-tuned model in the future." ]
[ 73, 64, 89, 31, 23, 5, 91 ]
[ "passage: TAGS\n#task_categories-summarization #task_categories-question-answering #size_categories-n<1K #language-English #license-mit #nlp-research-paper-abstract #nlp-research-paper #question-generation #region-us \n# Dataset Card for Dataset Name\nThis dataset was created by modifying and adapting the allenai/QASPER: a dataset for question answering on scientific research papers dataset \nand aims to generate Question-Answer Pairs from the Abstract, Introduction of an NLP Paper.### Dataset Description\n\n\n- First, we extracted the abstract, introduction of each NLP paper from QASPER dataset.\n- We also extracted only the rows labeled question and answer that had an abstract answer rather than extractive. \n- train : 421 rows\n- validation : 211 rows\n- test : 320 rows\n\n- Curated by: @UNIST-Eunchan\n\n-### Dataset Sources\n\nThis data is made by applying and processing allenai/qasper\n\n\n\n- Repository: allenai/qasper## Uses\n\n- Question Generation from Research Paper\n- Long-Document Summarization \n- Question-based Summarization## Dataset Creation### Curation Rationale\n\nLong Document Summarization datasets, especially those for Research Paper Summarization, are very limited and scarce. \n\nWe tweak the existing data to provide domains and QA pairs specific to NLP among Research Papers. \n\nWe expect to be able to generate multiple QA pairs if we let the model sample through training. \n\nWe will release the fine-tuned model in the future." ]
c97848ff6fc39853d20658b787c58c806583a705
Link to the project: https://www.corpusitaliano.it/it/index.html The PAISÀ corpus is an extensive collection of authentic Italian language texts taken from the Internet. The compilation of contemporary texts was created within the framework of the PAISÀ project (Platform for Learning Italian through Annotated Corpora) with the aim of providing authentic and freely available material for learning Italian. A unique resource for the Italian language has emerged, as it combines several features: - Texts taken from the web (collected in September/October 2010) - Composed entirely of texts available and reusable for free (protected by Creative Commons licenses attribution - share alike and attribution - share alike - non-commercial) - Size: approximately 250 million tokens - Fully annotated in CoNLL format (lemmatization and annotation of parts of speech (or also here) and syntactic dependencies) - Preprocessed automatically and partially corrected manually (various steps performed: retrieval of URL addresses, preprocessing of collected texts, and correction of annotations for the calibration of annotation tools) Although primarily created for learning, the PAISÀ corpus also serves as a valuable resource for various linguistic research activities. The website aims to provide an interface for learners to access the corpus online. There are various access modes designed for different user groups, from pre-set searches to the possibility of defining search criteria in a completely flexible manner by creating complex queries. It is currently a work in continuous update and progress. In these pages, you will also find information about the PAISÀ project, details on how the text collection was formed, and downloadable versions of the entire corpus. The PAISÀ project is funded by the Ministry of Education, Universities, and Research (MIUR) through the Basic Research Investment Fund (FIRB).
KarlGauss/paisa_corpus
[ "language:it", "license:cc-by-nc-sa-3.0", "region:us" ]
2023-11-26T16:25:47+00:00
{"language": ["it"], "license": "cc-by-nc-sa-3.0", "pretty_name": "pais\u00e0", "dataset_info": {"features": [{"name": "text_id", "dtype": "int64"}, {"name": "url", "dtype": "string"}, {"name": "text_content", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1166723608.8, "num_examples": 308484}, {"name": "test", "num_bytes": 291680902.2, "num_examples": 77121}], "download_size": 926608734, "dataset_size": 1458404511.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]}
2023-11-28T20:47:00+00:00
[]
[ "it" ]
TAGS #language-Italian #license-cc-by-nc-sa-3.0 #region-us
Link to the project: URL The PAISÀ corpus is an extensive collection of authentic Italian language texts taken from the Internet. The compilation of contemporary texts was created within the framework of the PAISÀ project (Platform for Learning Italian through Annotated Corpora) with the aim of providing authentic and freely available material for learning Italian. A unique resource for the Italian language has emerged, as it combines several features: - Texts taken from the web (collected in September/October 2010) - Composed entirely of texts available and reusable for free (protected by Creative Commons licenses attribution - share alike and attribution - share alike - non-commercial) - Size: approximately 250 million tokens - Fully annotated in CoNLL format (lemmatization and annotation of parts of speech (or also here) and syntactic dependencies) - Preprocessed automatically and partially corrected manually (various steps performed: retrieval of URL addresses, preprocessing of collected texts, and correction of annotations for the calibration of annotation tools) Although primarily created for learning, the PAISÀ corpus also serves as a valuable resource for various linguistic research activities. The website aims to provide an interface for learners to access the corpus online. There are various access modes designed for different user groups, from pre-set searches to the possibility of defining search criteria in a completely flexible manner by creating complex queries. It is currently a work in continuous update and progress. In these pages, you will also find information about the PAISÀ project, details on how the text collection was formed, and downloadable versions of the entire corpus. The PAISÀ project is funded by the Ministry of Education, Universities, and Research (MIUR) through the Basic Research Investment Fund (FIRB).
[]
[ "TAGS\n#language-Italian #license-cc-by-nc-sa-3.0 #region-us \n" ]
[ 24 ]
[ "passage: TAGS\n#language-Italian #license-cc-by-nc-sa-3.0 #region-us \n" ]
e078661215ff037467c49631e2fe16b14cf8a66c
# Dataset Card for Dataset Name ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This repository contains a machine-translated Modern Greek version of [SICK](https://huggingface.co/datasets/sick) (Sentences Involving Compositional Knowldedge) dataset. The goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B), which is a classification task (given two sentences, predict one of three labels). Apart from machine translating the sentence pairs, the rest of information (pair ID, labels, source dataset of each sentence, train/dev/test subset partition) has been left intact as in the original English dataset. The dataset is here formatted in a similar manner (TSV format) as the widely used [XNLI](https://huggingface.co/datasets/xnli) dataset for convenience. It is also compatible with the [French version of SICK](https://huggingface.co/datasets/maximoss/sick-fr-mt) for a combined 3-language NLI task (English, Greek, French) if used together, as they both keep the same column names. ### Supported Tasks and Leaderboards This dataset can be used for the task of Natural Language Inference (NLI), also known as Recognizing Textual Entailment (RTE), which is a sentence-pair classification task. ## Dataset Structure ### Data Fields - `pair_ID`: Sentence pair ID. - `sentence_A`: Sentence A, also known as premise in other NLI datasets. - `sentence_B`: Sentence B, also known as hypothesis in other NLI datasets. - `entailment_label`: textual entailment gold label (NEUTRAL, ENTAILMENT, or CONTRADICTION). - `entailment_AB`: Entailment label for the A-B order (A_neutral_B, A_entails_B, or A_contradicts_B). - `entailment_BA`: Entailment label for the B-A order (B_neutral_A, B_entails_A, or B_contradicts_A). - `original_SICK_sentence_A`: The original premise from the English source dataset. - `original_SICK_sentence_B`: The original hypothesis from the English source dataset. - `sentence_A_dataset`: The dataset from which the original sentence A was extracted (FLICKR vs. SEMEVAL). - `sentence_B_dataset`: The dataset from which the original sentence B was extracted (FLICKR vs. SEMEVAL). ### Data Splits | name |Entailment|Neutral|Contradiction|Total| |--------|---------:|------:|------------:|------------:| |train | 1274 | 2524 | 641 | 4439 | |validation | 143 | 281 | 71 | 495 | |test | 1404 | 2790 | 712 | 4906 | For the A-B order: | name |A_entails_B|A_neutral_B|A_contradicts_B| |--------|---------:|------:|------------:| |train | 1274 | 2381 | 784 | |validation | 143 | 266 | 86 | |test | 1404 | 2621 | 881 | For the B-A order: | name |B_entails_A|B_neutral_A|B_contradicts_A| |--------|---------:|------:|------------:| |train | 606 | 3072 | 761 | |validation | 84 | 329 | 82 | |test | 610 | 3431 | 865 | ## Dataset Creation The dataset was machine translated from English to Modern Greek using the latest neural machine translation [opus-mt-tc-big](https://huggingface.co/Helsinki-NLP/opus-mt-tc-big-en-el) model available for Modern Greek. The translation of the sentences was carried out on November 26th, 2023. ## Additional Information ### Citation Information **BibTeX:** ````BibTeX @inproceedings{marelli-etal-2014-sick, title = "A {SICK} cure for the evaluation of compositional distributional semantic models", author = "Marelli, Marco and Menini, Stefano and Baroni, Marco and Bentivogli, Luisa and Bernardi, Raffaella and Zamparelli, Roberto", editor = "Calzolari, Nicoletta and Choukri, Khalid and Declerck, Thierry and Loftsson, Hrafn and Maegaard, Bente and Mariani, Joseph and Moreno, Asuncion and Odijk, Jan and Piperidis, Stelios", booktitle = "Proceedings of the Ninth International Conference on Language Resources and Evaluation ({LREC}'14)", month = may, year = "2014", address = "Reykjavik, Iceland", publisher = "European Language Resources Association (ELRA)", url = "http://www.lrec-conf.org/proceedings/lrec2014/pdf/363_Paper.pdf", pages = "216--223", abstract = "Shared and internationally recognized benchmarks are fundamental for the development of any computational system. We aim to help the research community working on compositional distributional semantic models (CDSMs) by providing SICK (Sentences Involving Compositional Knowldedge), a large size English benchmark tailored for them. SICK consists of about 10,000 English sentence pairs that include many examples of the lexical, syntactic and semantic phenomena that CDSMs are expected to account for, but do not require dealing with other aspects of existing sentential data sets (idiomatic multiword expressions, named entities, telegraphic language) that are not within the scope of CDSMs. By means of crowdsourcing techniques, each pair was annotated for two crucial semantic tasks: relatedness in meaning (with a 5-point rating scale as gold score) and entailment relation between the two elements (with three possible gold labels: entailment, contradiction, and neutral). The SICK data set was used in SemEval-2014 Task 1, and it freely available for research purposes.", } @inproceedings{tiedemann-thottingal-2020-opus, title = "{OPUS}-{MT} {--} Building open translation services for the World", author = {Tiedemann, J{\"o}rg and Thottingal, Santhosh}, booktitle = "Proceedings of the 22nd Annual Conference of the European Association for Machine Translation", month = nov, year = "2020", address = "Lisboa, Portugal", publisher = "European Association for Machine Translation", url = "https://aclanthology.org/2020.eamt-1.61", pages = "479--480", abstract = "This paper presents OPUS-MT a project that focuses on the development of free resources and tools for machine translation. The current status is a repository of over 1,000 pre-trained neural machine translation models that are ready to be launched in on-line translation services. For this we also provide open source implementations of web applications that can run efficiently on average desktop hardware with a straightforward setup and installation.", } ```` **ACL:** Marco Marelli, Stefano Menini, Marco Baroni, Luisa Bentivogli, Raffaella Bernardi, and Roberto Zamparelli. 2014. [A SICK cure for the evaluation of compositional distributional semantic models](http://www.lrec-conf.org/proceedings/lrec2014/pdf/363_Paper.pdf). In *Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC'14)*, pages 216–223, Reykjavik, Iceland. European Language Resources Association (ELRA). Jörg Tiedemann and Santhosh Thottingal. 2020. [OPUS-MT – Building open translation services for the World](https://aclanthology.org/2020.eamt-1.61). In *Proceedings of the 22nd Annual Conference of the European Association for Machine Translation*, pages 479–480, Lisboa, Portugal. European Association for Machine Translation. ### Acknowledgements This translation of the original dataset was done as part of a research project supported by the Defence Innovation Agency (AID) of the Directorate General of Armament (DGA) of the French Ministry of Armed Forces, and by the ICO, _Institut Cybersécurité Occitanie_, funded by Région Occitanie, France.
maximoss/sick_el-gr_mt
[ "task_categories:text-classification", "task_ids:natural-language-inference", "task_ids:multi-input-text-classification", "size_categories:1K<n<10K", "language:el", "license:cc-by-nc-sa-4.0", "region:us" ]
2023-11-26T17:05:03+00:00
{"language": ["el"], "license": "cc-by-nc-sa-4.0", "size_categories": ["1K<n<10K"], "task_categories": ["text-classification"], "task_ids": ["natural-language-inference", "multi-input-text-classification"]}
2024-02-10T08:44:58+00:00
[]
[ "el" ]
TAGS #task_categories-text-classification #task_ids-natural-language-inference #task_ids-multi-input-text-classification #size_categories-1K<n<10K #language-Modern Greek (1453-) #license-cc-by-nc-sa-4.0 #region-us
Dataset Card for Dataset Name ============================= Dataset Description ------------------- * Homepage: * Repository: * Paper: * Leaderboard: * Point of Contact: ### Dataset Summary This repository contains a machine-translated Modern Greek version of SICK (Sentences Involving Compositional Knowldedge) dataset. The goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B), which is a classification task (given two sentences, predict one of three labels). Apart from machine translating the sentence pairs, the rest of information (pair ID, labels, source dataset of each sentence, train/dev/test subset partition) has been left intact as in the original English dataset. The dataset is here formatted in a similar manner (TSV format) as the widely used XNLI dataset for convenience. It is also compatible with the French version of SICK for a combined 3-language NLI task (English, Greek, French) if used together, as they both keep the same column names. ### Supported Tasks and Leaderboards This dataset can be used for the task of Natural Language Inference (NLI), also known as Recognizing Textual Entailment (RTE), which is a sentence-pair classification task. Dataset Structure ----------------- ### Data Fields * 'pair\_ID': Sentence pair ID. * 'sentence\_A': Sentence A, also known as premise in other NLI datasets. * 'sentence\_B': Sentence B, also known as hypothesis in other NLI datasets. * 'entailment\_label': textual entailment gold label (NEUTRAL, ENTAILMENT, or CONTRADICTION). * 'entailment\_AB': Entailment label for the A-B order (A\_neutral\_B, A\_entails\_B, or A\_contradicts\_B). * 'entailment\_BA': Entailment label for the B-A order (B\_neutral\_A, B\_entails\_A, or B\_contradicts\_A). * 'original\_SICK\_sentence\_A': The original premise from the English source dataset. * 'original\_SICK\_sentence\_B': The original hypothesis from the English source dataset. * 'sentence\_A\_dataset': The dataset from which the original sentence A was extracted (FLICKR vs. SEMEVAL). * 'sentence\_B\_dataset': The dataset from which the original sentence B was extracted (FLICKR vs. SEMEVAL). ### Data Splits For the A-B order: For the B-A order: Dataset Creation ---------------- The dataset was machine translated from English to Modern Greek using the latest neural machine translation opus-mt-tc-big model available for Modern Greek. The translation of the sentences was carried out on November 26th, 2023. Additional Information ---------------------- BibTeX: ' ACL: Marco Marelli, Stefano Menini, Marco Baroni, Luisa Bentivogli, Raffaella Bernardi, and Roberto Zamparelli. 2014. A SICK cure for the evaluation of compositional distributional semantic models. In *Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC'14)*, pages 216–223, Reykjavik, Iceland. European Language Resources Association (ELRA). Jörg Tiedemann and Santhosh Thottingal. 2020. OPUS-MT – Building open translation services for the World. In *Proceedings of the 22nd Annual Conference of the European Association for Machine Translation*, pages 479–480, Lisboa, Portugal. European Association for Machine Translation. ### Acknowledgements This translation of the original dataset was done as part of a research project supported by the Defence Innovation Agency (AID) of the Directorate General of Armament (DGA) of the French Ministry of Armed Forces, and by the ICO, *Institut Cybersécurité Occitanie*, funded by Région Occitanie, France.
[ "### Dataset Summary\n\n\nThis repository contains a machine-translated Modern Greek version of SICK (Sentences Involving Compositional Knowldedge) dataset. The goal is to predict textual entailment (does sentence A\nimply/contradict/neither sentence B), which is a classification task (given two sentences, predict one of three labels). Apart from machine translating the sentence pairs, the rest of information (pair ID, labels, source dataset of each sentence, train/dev/test subset partition) has been left intact as in the original English dataset.\n\n\nThe dataset is here formatted in a similar manner (TSV format) as the widely used XNLI dataset for convenience.\nIt is also compatible with the French version of SICK for a combined 3-language NLI task (English, Greek, French) if used together, as they both keep the same column names.", "### Supported Tasks and Leaderboards\n\n\nThis dataset can be used for the task of Natural Language Inference (NLI), also known as Recognizing Textual Entailment (RTE), which is a sentence-pair classification task.\n\n\nDataset Structure\n-----------------", "### Data Fields\n\n\n* 'pair\\_ID': Sentence pair ID.\n* 'sentence\\_A': Sentence A, also known as premise in other NLI datasets.\n* 'sentence\\_B': Sentence B, also known as hypothesis in other NLI datasets.\n* 'entailment\\_label': textual entailment gold label (NEUTRAL, ENTAILMENT, or CONTRADICTION).\n* 'entailment\\_AB': Entailment label for the A-B order (A\\_neutral\\_B, A\\_entails\\_B, or A\\_contradicts\\_B).\n* 'entailment\\_BA': Entailment label for the B-A order (B\\_neutral\\_A, B\\_entails\\_A, or B\\_contradicts\\_A).\n* 'original\\_SICK\\_sentence\\_A': The original premise from the English source dataset.\n* 'original\\_SICK\\_sentence\\_B': The original hypothesis from the English source dataset.\n* 'sentence\\_A\\_dataset': The dataset from which the original sentence A was extracted (FLICKR vs. SEMEVAL).\n* 'sentence\\_B\\_dataset': The dataset from which the original sentence B was extracted (FLICKR vs. SEMEVAL).", "### Data Splits\n\n\n\nFor the A-B order:\n\n\n\nFor the B-A order:\n\n\n\nDataset Creation\n----------------\n\n\nThe dataset was machine translated from English to Modern Greek using the latest neural machine translation opus-mt-tc-big model available for Modern Greek.\nThe translation of the sentences was carried out on November 26th, 2023.\n\n\nAdditional Information\n----------------------\n\n\nBibTeX:\n\n\n'\n\n\nACL:\n\n\nMarco Marelli, Stefano Menini, Marco Baroni, Luisa Bentivogli, Raffaella Bernardi, and Roberto Zamparelli. 2014. A SICK cure for the evaluation of compositional distributional semantic models. In *Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC'14)*, pages 216–223, Reykjavik, Iceland. European Language Resources Association (ELRA).\n\n\nJörg Tiedemann and Santhosh Thottingal. 2020. OPUS-MT – Building open translation services for the World. In *Proceedings of the 22nd Annual Conference of the European Association for Machine Translation*, pages 479–480, Lisboa, Portugal. European Association for Machine Translation.", "### Acknowledgements\n\n\nThis translation of the original dataset was done as part of a research project supported by the Defence Innovation Agency (AID) of the Directorate General of Armament (DGA) of the French Ministry of Armed Forces, and by the ICO, *Institut Cybersécurité Occitanie*, funded by Région Occitanie, France." ]
[ "TAGS\n#task_categories-text-classification #task_ids-natural-language-inference #task_ids-multi-input-text-classification #size_categories-1K<n<10K #language-Modern Greek (1453-) #license-cc-by-nc-sa-4.0 #region-us \n", "### Dataset Summary\n\n\nThis repository contains a machine-translated Modern Greek version of SICK (Sentences Involving Compositional Knowldedge) dataset. The goal is to predict textual entailment (does sentence A\nimply/contradict/neither sentence B), which is a classification task (given two sentences, predict one of three labels). Apart from machine translating the sentence pairs, the rest of information (pair ID, labels, source dataset of each sentence, train/dev/test subset partition) has been left intact as in the original English dataset.\n\n\nThe dataset is here formatted in a similar manner (TSV format) as the widely used XNLI dataset for convenience.\nIt is also compatible with the French version of SICK for a combined 3-language NLI task (English, Greek, French) if used together, as they both keep the same column names.", "### Supported Tasks and Leaderboards\n\n\nThis dataset can be used for the task of Natural Language Inference (NLI), also known as Recognizing Textual Entailment (RTE), which is a sentence-pair classification task.\n\n\nDataset Structure\n-----------------", "### Data Fields\n\n\n* 'pair\\_ID': Sentence pair ID.\n* 'sentence\\_A': Sentence A, also known as premise in other NLI datasets.\n* 'sentence\\_B': Sentence B, also known as hypothesis in other NLI datasets.\n* 'entailment\\_label': textual entailment gold label (NEUTRAL, ENTAILMENT, or CONTRADICTION).\n* 'entailment\\_AB': Entailment label for the A-B order (A\\_neutral\\_B, A\\_entails\\_B, or A\\_contradicts\\_B).\n* 'entailment\\_BA': Entailment label for the B-A order (B\\_neutral\\_A, B\\_entails\\_A, or B\\_contradicts\\_A).\n* 'original\\_SICK\\_sentence\\_A': The original premise from the English source dataset.\n* 'original\\_SICK\\_sentence\\_B': The original hypothesis from the English source dataset.\n* 'sentence\\_A\\_dataset': The dataset from which the original sentence A was extracted (FLICKR vs. SEMEVAL).\n* 'sentence\\_B\\_dataset': The dataset from which the original sentence B was extracted (FLICKR vs. SEMEVAL).", "### Data Splits\n\n\n\nFor the A-B order:\n\n\n\nFor the B-A order:\n\n\n\nDataset Creation\n----------------\n\n\nThe dataset was machine translated from English to Modern Greek using the latest neural machine translation opus-mt-tc-big model available for Modern Greek.\nThe translation of the sentences was carried out on November 26th, 2023.\n\n\nAdditional Information\n----------------------\n\n\nBibTeX:\n\n\n'\n\n\nACL:\n\n\nMarco Marelli, Stefano Menini, Marco Baroni, Luisa Bentivogli, Raffaella Bernardi, and Roberto Zamparelli. 2014. A SICK cure for the evaluation of compositional distributional semantic models. In *Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC'14)*, pages 216–223, Reykjavik, Iceland. European Language Resources Association (ELRA).\n\n\nJörg Tiedemann and Santhosh Thottingal. 2020. OPUS-MT – Building open translation services for the World. In *Proceedings of the 22nd Annual Conference of the European Association for Machine Translation*, pages 479–480, Lisboa, Portugal. European Association for Machine Translation.", "### Acknowledgements\n\n\nThis translation of the original dataset was done as part of a research project supported by the Defence Innovation Agency (AID) of the Directorate General of Armament (DGA) of the French Ministry of Armed Forces, and by the ICO, *Institut Cybersécurité Occitanie*, funded by Région Occitanie, France." ]
[ 80, 209, 61, 324, 250, 85 ]
[ "passage: TAGS\n#task_categories-text-classification #task_ids-natural-language-inference #task_ids-multi-input-text-classification #size_categories-1K<n<10K #language-Modern Greek (1453-) #license-cc-by-nc-sa-4.0 #region-us \n### Dataset Summary\n\n\nThis repository contains a machine-translated Modern Greek version of SICK (Sentences Involving Compositional Knowldedge) dataset. The goal is to predict textual entailment (does sentence A\nimply/contradict/neither sentence B), which is a classification task (given two sentences, predict one of three labels). Apart from machine translating the sentence pairs, the rest of information (pair ID, labels, source dataset of each sentence, train/dev/test subset partition) has been left intact as in the original English dataset.\n\n\nThe dataset is here formatted in a similar manner (TSV format) as the widely used XNLI dataset for convenience.\nIt is also compatible with the French version of SICK for a combined 3-language NLI task (English, Greek, French) if used together, as they both keep the same column names.### Supported Tasks and Leaderboards\n\n\nThis dataset can be used for the task of Natural Language Inference (NLI), also known as Recognizing Textual Entailment (RTE), which is a sentence-pair classification task.\n\n\nDataset Structure\n-----------------" ]
4b6d417268c83e232a77a44926ea803066a4cbb1
# Dataset Card for "bundestagv2" ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Source Data](#source-data) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Licensing Information](#licensing-information) ## Dataset Description This is the second version of our bundestag Dataset and the first one that reached a sufficient quality. It has been obtained using the .srt files found on the website of "Parlamentsfernsehen Bundestag" and has been further optimized using forced alignment. ### Dataset Summary Almost 300k Rows of data, filtered to up to 30 seconds of audio. ### Supported Tasks and Leaderboards This dataset is mostly intended for ASR usage. ## Dataset Structure ### Data Instances Every instance ranges from 0 to 30 seconds of audio. ### Data Fields We provide the waveforms and the corresponding transcriptions. ### Data Splits This is currently one large split, so consider applying train_test_split first before using it for training. ## Dataset Creation ### Source Data #### Initial Data Collection and Normalization Aligned using oliverguhr/wav2vec2-large-xlsr-53-german-cv9 ## Considerations for Using the Data ### Discussion of Biases There are quite a few dialects present throughout the dataset. ### Other Known Limitations A lot of the sessions were held online, as such the audio quality varies drastically and is sometimes too bad to ensure good transcriptions. The .srt files used are not flawless, as such there might be a few errors in the dataset. Additionally, the alignment will be off from time to time, resulting in missing speech with the full transcript which could increase hallucinations. ## Additional Information ### Licensing Information This dataset is currently gated and will remain this as long as we are uncertain about the legality of republishing audio of the German "Bundestag".
Yaasr/bundestagv2
[ "task_categories:automatic-speech-recognition", "language:de", "region:us" ]
2023-11-26T17:14:12+00:00
{"language": ["de"], "task_categories": ["automatic-speech-recognition"], "dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "transcription", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 74359398844.398, "num_examples": 292462}], "download_size": 107587361654, "dataset_size": 74359398844.398}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-12-23T23:20:49+00:00
[]
[ "de" ]
TAGS #task_categories-automatic-speech-recognition #language-German #region-us
# Dataset Card for "bundestagv2" ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Source Data - Considerations for Using the Data - Discussion of Biases - Other Known Limitations - Additional Information - Licensing Information ## Dataset Description This is the second version of our bundestag Dataset and the first one that reached a sufficient quality. It has been obtained using the .srt files found on the website of "Parlamentsfernsehen Bundestag" and has been further optimized using forced alignment. ### Dataset Summary Almost 300k Rows of data, filtered to up to 30 seconds of audio. ### Supported Tasks and Leaderboards This dataset is mostly intended for ASR usage. ## Dataset Structure ### Data Instances Every instance ranges from 0 to 30 seconds of audio. ### Data Fields We provide the waveforms and the corresponding transcriptions. ### Data Splits This is currently one large split, so consider applying train_test_split first before using it for training. ## Dataset Creation ### Source Data #### Initial Data Collection and Normalization Aligned using oliverguhr/wav2vec2-large-xlsr-53-german-cv9 ## Considerations for Using the Data ### Discussion of Biases There are quite a few dialects present throughout the dataset. ### Other Known Limitations A lot of the sessions were held online, as such the audio quality varies drastically and is sometimes too bad to ensure good transcriptions. The .srt files used are not flawless, as such there might be a few errors in the dataset. Additionally, the alignment will be off from time to time, resulting in missing speech with the full transcript which could increase hallucinations. ## Additional Information ### Licensing Information This dataset is currently gated and will remain this as long as we are uncertain about the legality of republishing audio of the German "Bundestag".
[ "# Dataset Card for \"bundestagv2\"", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Source Data\n- Considerations for Using the Data\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Licensing Information", "## Dataset Description\n\nThis is the second version of our bundestag Dataset and the first one that reached a sufficient quality. \nIt has been obtained using the .srt files found on the website of \"Parlamentsfernsehen Bundestag\" and has been further optimized using forced alignment.", "### Dataset Summary\n\nAlmost 300k Rows of data, filtered to up to 30 seconds of audio.", "### Supported Tasks and Leaderboards\n\nThis dataset is mostly intended for ASR usage.", "## Dataset Structure", "### Data Instances\n\nEvery instance ranges from 0 to 30 seconds of audio.", "### Data Fields\n\nWe provide the waveforms and the corresponding transcriptions.", "### Data Splits\n\nThis is currently one large split, so consider applying train_test_split first before using it for training.", "## Dataset Creation", "### Source Data", "#### Initial Data Collection and Normalization\n\nAligned using oliverguhr/wav2vec2-large-xlsr-53-german-cv9", "## Considerations for Using the Data", "### Discussion of Biases\n\nThere are quite a few dialects present throughout the dataset.", "### Other Known Limitations\n\nA lot of the sessions were held online, as such the audio quality varies drastically and is sometimes too bad to ensure good transcriptions. The .srt files used are not flawless, as such there might be a few errors in the dataset. Additionally, the alignment will be off from time to time, resulting in missing speech with the full transcript which could increase hallucinations.", "## Additional Information", "### Licensing Information\n\nThis dataset is currently gated and will remain this as long as we are uncertain about the legality of republishing audio of the German \"Bundestag\"." ]
[ "TAGS\n#task_categories-automatic-speech-recognition #language-German #region-us \n", "# Dataset Card for \"bundestagv2\"", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Source Data\n- Considerations for Using the Data\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Licensing Information", "## Dataset Description\n\nThis is the second version of our bundestag Dataset and the first one that reached a sufficient quality. \nIt has been obtained using the .srt files found on the website of \"Parlamentsfernsehen Bundestag\" and has been further optimized using forced alignment.", "### Dataset Summary\n\nAlmost 300k Rows of data, filtered to up to 30 seconds of audio.", "### Supported Tasks and Leaderboards\n\nThis dataset is mostly intended for ASR usage.", "## Dataset Structure", "### Data Instances\n\nEvery instance ranges from 0 to 30 seconds of audio.", "### Data Fields\n\nWe provide the waveforms and the corresponding transcriptions.", "### Data Splits\n\nThis is currently one large split, so consider applying train_test_split first before using it for training.", "## Dataset Creation", "### Source Data", "#### Initial Data Collection and Normalization\n\nAligned using oliverguhr/wav2vec2-large-xlsr-53-german-cv9", "## Considerations for Using the Data", "### Discussion of Biases\n\nThere are quite a few dialects present throughout the dataset.", "### Other Known Limitations\n\nA lot of the sessions were held online, as such the audio quality varies drastically and is sometimes too bad to ensure good transcriptions. The .srt files used are not flawless, as such there might be a few errors in the dataset. Additionally, the alignment will be off from time to time, resulting in missing speech with the full transcript which could increase hallucinations.", "## Additional Information", "### Licensing Information\n\nThis dataset is currently gated and will remain this as long as we are uncertain about the legality of republishing audio of the German \"Bundestag\"." ]
[ 26, 12, 86, 65, 25, 21, 6, 18, 20, 29, 5, 4, 39, 8, 21, 98, 5, 41 ]
[ "passage: TAGS\n#task_categories-automatic-speech-recognition #language-German #region-us \n# Dataset Card for \"bundestagv2\"## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Source Data\n- Considerations for Using the Data\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Licensing Information## Dataset Description\n\nThis is the second version of our bundestag Dataset and the first one that reached a sufficient quality. \nIt has been obtained using the .srt files found on the website of \"Parlamentsfernsehen Bundestag\" and has been further optimized using forced alignment.### Dataset Summary\n\nAlmost 300k Rows of data, filtered to up to 30 seconds of audio.### Supported Tasks and Leaderboards\n\nThis dataset is mostly intended for ASR usage.## Dataset Structure### Data Instances\n\nEvery instance ranges from 0 to 30 seconds of audio.### Data Fields\n\nWe provide the waveforms and the corresponding transcriptions.### Data Splits\n\nThis is currently one large split, so consider applying train_test_split first before using it for training.## Dataset Creation### Source Data#### Initial Data Collection and Normalization\n\nAligned using oliverguhr/wav2vec2-large-xlsr-53-german-cv9## Considerations for Using the Data### Discussion of Biases\n\nThere are quite a few dialects present throughout the dataset.### Other Known Limitations\n\nA lot of the sessions were held online, as such the audio quality varies drastically and is sometimes too bad to ensure good transcriptions. The .srt files used are not flawless, as such there might be a few errors in the dataset. Additionally, the alignment will be off from time to time, resulting in missing speech with the full transcript which could increase hallucinations.## Additional Information" ]
9744dbd9ac608954336243052086800eb2be2c66
# Dataset Card for "Thermostability_reg" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
lhallee/Thermostability_reg
[ "region:us" ]
2023-11-26T18:05:18+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "valid", "path": "data/valid-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "seqs", "dtype": "string"}, {"name": "labels", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 2990210, "num_examples": 5056}, {"name": "valid", "num_bytes": 373605, "num_examples": 639}, {"name": "test", "num_bytes": 795351, "num_examples": 1336}], "download_size": 4142780, "dataset_size": 4159166}}
2023-11-26T18:05:23+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Thermostability_reg" More Information needed
[ "# Dataset Card for \"Thermostability_reg\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Thermostability_reg\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"Thermostability_reg\"\n\nMore Information needed" ]
395a8e00f5a2d8b7c3f1cd2bb676ebdb78caa5cf
# Dataset Card for "Thermostability_fold" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
lhallee/Thermostability_fold
[ "region:us" ]
2023-11-26T18:05:27+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "valid", "path": "data/valid-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "seqs", "dtype": "string"}, {"name": "labels", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 5920444, "num_examples": 5056}, {"name": "valid", "num_bytes": 739542, "num_examples": 639}, {"name": "test", "num_bytes": 1574670, "num_examples": 1336}], "download_size": 6866906, "dataset_size": 8234656}}
2023-11-26T18:05:33+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Thermostability_fold" More Information needed
[ "# Dataset Card for \"Thermostability_fold\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Thermostability_fold\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"Thermostability_fold\"\n\nMore Information needed" ]
6700cc76d72155080d4f6dc6f2a5b16845a3010e
# Dataset Card for "anoel-11-26" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Chunt0/anoel-11-26
[ "region:us" ]
2023-11-26T18:12:43+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 19715096.0, "num_examples": 78}], "download_size": 19715409, "dataset_size": 19715096.0}}
2023-11-26T18:45:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "anoel-11-26" More Information needed
[ "# Dataset Card for \"anoel-11-26\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"anoel-11-26\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"anoel-11-26\"\n\nMore Information needed" ]
e3ffed903b9dad7fc9273a484d2cd396ed1157ff
# Annotation Annotated this [dataset](https://www.kaggle.com/datasets/srbhshinde/flickr8k-sau) by clasifying the images into slow, medium or fast depending on the suitable paced background music.
onlycaps/flickr8k-sau-pace-annotated
[ "task_categories:image-classification", "license:mit", "region:us" ]
2023-11-26T18:20:45+00:00
{"license": "mit", "task_categories": ["image-classification"]}
2023-11-26T18:30:31+00:00
[]
[]
TAGS #task_categories-image-classification #license-mit #region-us
# Annotation Annotated this dataset by clasifying the images into slow, medium or fast depending on the suitable paced background music.
[ "# Annotation\n\nAnnotated this dataset by clasifying the images into slow, medium or fast depending on the suitable paced background music." ]
[ "TAGS\n#task_categories-image-classification #license-mit #region-us \n", "# Annotation\n\nAnnotated this dataset by clasifying the images into slow, medium or fast depending on the suitable paced background music." ]
[ 22, 31 ]
[ "passage: TAGS\n#task_categories-image-classification #license-mit #region-us \n# Annotation\n\nAnnotated this dataset by clasifying the images into slow, medium or fast depending on the suitable paced background music." ]
6376281fe19ff5bcfd13c4a6ed851bd4c5339b24
# Dataset Card for "elephants" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mespinosami/elephants
[ "region:us" ]
2023-11-26T18:53:42+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "eles", "1": "no_eles"}}}}], "splits": [{"name": "train", "num_bytes": 14514122908.549025, "num_examples": 159283}, {"name": "test", "num_bytes": 3662606794.077976, "num_examples": 39821}], "download_size": 17959333418, "dataset_size": 18176729702.627}}
2023-11-26T19:16:36+00:00
[]
[]
TAGS #region-us
# Dataset Card for "elephants" More Information needed
[ "# Dataset Card for \"elephants\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"elephants\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"elephants\"\n\nMore Information needed" ]
51e167b4a21fddf4ae218636e0ffda1a3e68d79d
# Dataset Card for "evaluation" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
HossainRabby/evaluation
[ "region:us" ]
2023-11-26T19:50:31+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "predicted_answer", "dtype": "string"}, {"name": "target_answer", "dtype": "string"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int32"}], "splits": [{"name": "train", "num_bytes": 102827, "num_examples": 82}], "download_size": 0, "dataset_size": 102827}}
2023-11-27T13:11:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "evaluation" More Information needed
[ "# Dataset Card for \"evaluation\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"evaluation\"\n\nMore Information needed" ]
[ 6, 12 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"evaluation\"\n\nMore Information needed" ]
73a30aab435561044cbd3decef5279d5153c40e7
# Social Network: MOOC User Action Dataset [Link to original dataset]((https://snap.stanford.edu/data/act-mooc.html)) The MOOC user action dataset represents the actions taken by users on a popular MOOC platform. The actions have attributes and timestamps. Additionally, each action has a binary label, representing whether the user dropped-out of the course after this action, i.e., whether this is last action of the user. The dataset have been generated as part of the research project on advanced user modeling and recommender systems. Original dataset had 3 distinct csv files, they are joined to a single file here. Columns: * ACTIONID: a unique id for each action. * USERID: a unique id for each user. * TARGETID: a unique id for each target activity. * FEATUREx: a feature value associated with the action. Total four in count, making it a 4-dimensional feature vector. * TIMESTAMP: timestamp for the action in seconds from the beginning. * LABEL: a binary label indicating whether the student drops-out after the action. The value is 1 for drop-out actions, 0 otherwise.
sswt/stanford-act-mooc
[ "region:us" ]
2023-11-26T19:58:55+00:00
{}
2023-11-27T08:25:13+00:00
[]
[]
TAGS #region-us
# Social Network: MOOC User Action Dataset Link to original dataset) The MOOC user action dataset represents the actions taken by users on a popular MOOC platform. The actions have attributes and timestamps. Additionally, each action has a binary label, representing whether the user dropped-out of the course after this action, i.e., whether this is last action of the user. The dataset have been generated as part of the research project on advanced user modeling and recommender systems. Original dataset had 3 distinct csv files, they are joined to a single file here. Columns: * ACTIONID: a unique id for each action. * USERID: a unique id for each user. * TARGETID: a unique id for each target activity. * FEATUREx: a feature value associated with the action. Total four in count, making it a 4-dimensional feature vector. * TIMESTAMP: timestamp for the action in seconds from the beginning. * LABEL: a binary label indicating whether the student drops-out after the action. The value is 1 for drop-out actions, 0 otherwise.
[ "# Social Network: MOOC User Action Dataset\n\nLink to original dataset)\n\nThe MOOC user action dataset represents the actions taken by users on a popular MOOC platform. The actions have attributes and timestamps. Additionally, each action has a binary label, representing whether the user dropped-out of the course after this action, i.e., whether this is last action of the user.\n\nThe dataset have been generated as part of the research project on advanced user modeling and recommender systems.\n\nOriginal dataset had 3 distinct csv files, they are joined to a single file here.\n\nColumns:\n\n* ACTIONID: a unique id for each action.\n* USERID: a unique id for each user.\n* TARGETID: a unique id for each target activity.\n* FEATUREx: a feature value associated with the action. Total four in count, making it a 4-dimensional feature vector.\n* TIMESTAMP: timestamp for the action in seconds from the beginning.\n* LABEL: a binary label indicating whether the student drops-out after the action. The value is 1 for drop-out actions, 0 otherwise." ]
[ "TAGS\n#region-us \n", "# Social Network: MOOC User Action Dataset\n\nLink to original dataset)\n\nThe MOOC user action dataset represents the actions taken by users on a popular MOOC platform. The actions have attributes and timestamps. Additionally, each action has a binary label, representing whether the user dropped-out of the course after this action, i.e., whether this is last action of the user.\n\nThe dataset have been generated as part of the research project on advanced user modeling and recommender systems.\n\nOriginal dataset had 3 distinct csv files, they are joined to a single file here.\n\nColumns:\n\n* ACTIONID: a unique id for each action.\n* USERID: a unique id for each user.\n* TARGETID: a unique id for each target activity.\n* FEATUREx: a feature value associated with the action. Total four in count, making it a 4-dimensional feature vector.\n* TIMESTAMP: timestamp for the action in seconds from the beginning.\n* LABEL: a binary label indicating whether the student drops-out after the action. The value is 1 for drop-out actions, 0 otherwise." ]
[ 6, 252 ]
[ "passage: TAGS\n#region-us \n# Social Network: MOOC User Action Dataset\n\nLink to original dataset)\n\nThe MOOC user action dataset represents the actions taken by users on a popular MOOC platform. The actions have attributes and timestamps. Additionally, each action has a binary label, representing whether the user dropped-out of the course after this action, i.e., whether this is last action of the user.\n\nThe dataset have been generated as part of the research project on advanced user modeling and recommender systems.\n\nOriginal dataset had 3 distinct csv files, they are joined to a single file here.\n\nColumns:\n\n* ACTIONID: a unique id for each action.\n* USERID: a unique id for each user.\n* TARGETID: a unique id for each target activity.\n* FEATUREx: a feature value associated with the action. Total four in count, making it a 4-dimensional feature vector.\n* TIMESTAMP: timestamp for the action in seconds from the beginning.\n* LABEL: a binary label indicating whether the student drops-out after the action. The value is 1 for drop-out actions, 0 otherwise." ]
7b9db8ea786738263bc38b35f73f091479ebbdf0
# Strike-T1 dataset_info: features: - name: image dtype: image - name: cr dtype: int64 splits: - name: train num_bytes: 162832.0 num_examples: 12 download_size: 70261 dataset_size: 162832.0 configs: - config_name: default data_files: - split: train path: data/train-* ---
hsienchen/strike-T1
[ "task_categories:feature-extraction", "size_categories:1M<n<10M", "language:av", "language:ae", "license:mit", "code", "medical", "region:us" ]
2023-11-26T20:05:22+00:00
{"language": ["av", "ae"], "license": "mit", "size_categories": ["1M<n<10M"], "task_categories": ["feature-extraction"], "pretty_name": "Strike_thru2", "tags": ["code", "medical"], "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "cr", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 219666.0, "num_examples": 12}], "download_size": 180506, "dataset_size": 219666.0}}
2023-11-26T20:19:45+00:00
[]
[ "av", "ae" ]
TAGS #task_categories-feature-extraction #size_categories-1M<n<10M #language-Avaric #language-Avestan #license-mit #code #medical #region-us
# Strike-T1 dataset_info: features: - name: image dtype: image - name: cr dtype: int64 splits: - name: train num_bytes: 162832.0 num_examples: 12 download_size: 70261 dataset_size: 162832.0 configs: - config_name: default data_files: - split: train path: data/train-* ---
[ "# Strike-T1\n\ndataset_info:\n features:\n - name: image\n dtype: image\n - name: cr\n dtype: int64\n splits:\n - name: train\n num_bytes: 162832.0\n num_examples: 12\n download_size: 70261\n dataset_size: 162832.0\nconfigs:\n- config_name: default\n data_files:\n - split: train\n path: data/train-*\n---" ]
[ "TAGS\n#task_categories-feature-extraction #size_categories-1M<n<10M #language-Avaric #language-Avestan #license-mit #code #medical #region-us \n", "# Strike-T1\n\ndataset_info:\n features:\n - name: image\n dtype: image\n - name: cr\n dtype: int64\n splits:\n - name: train\n num_bytes: 162832.0\n num_examples: 12\n download_size: 70261\n dataset_size: 162832.0\nconfigs:\n- config_name: default\n data_files:\n - split: train\n path: data/train-*\n---" ]
[ 52, 97 ]
[ "passage: TAGS\n#task_categories-feature-extraction #size_categories-1M<n<10M #language-Avaric #language-Avestan #license-mit #code #medical #region-us \n# Strike-T1\n\ndataset_info:\n features:\n - name: image\n dtype: image\n - name: cr\n dtype: int64\n splits:\n - name: train\n num_bytes: 162832.0\n num_examples: 12\n download_size: 70261\n dataset_size: 162832.0\nconfigs:\n- config_name: default\n data_files:\n - split: train\n path: data/train-*\n---" ]
3b634eabb55b6268e8bed9c8a54d8335835cc571
# Dataset of ch'en (Arknights) This is the dataset of ch'en (Arknights), containing 200 images and their tags. Images are crawled from many sites (e.g. danbooru, pixiv, zerochan ...), the auto-crawling system is powered by [DeepGHS Team](https://github.com/deepghs)([huggingface organization](https://huggingface.co/deepghs)).([LittleAppleWebUI](https://github.com/LittleApple-fp16/LittleAppleWebUI)) | Name | Images | Download | Description | |:----------------|---------:|:----------------------------------------|:-----------------------------------------------------------------------------------------| | raw | 200 | [Download](dataset-raw.zip) | Raw data with meta information. | | raw-stage3 | 490 | [Download](dataset-raw-stage3.zip) | 3-stage cropped raw data with meta information. | | raw-stage3-eyes | 605 | [Download](dataset-raw-stage3-eyes.zip) | 3-stage cropped (with eye-focus) raw data with meta information. | | 384x512 | 200 | [Download](dataset-384x512.zip) | 384x512 aligned dataset. | | 512x704 | 200 | [Download](dataset-512x704.zip) | 512x704 aligned dataset. | | 640x880 | 200 | [Download](dataset-640x880.zip) | 640x880 aligned dataset. | | stage3-640 | 490 | [Download](dataset-stage3-640.zip) | 3-stage cropped dataset with the shorter side not exceeding 640 pixels. | | stage3-800 | 490 | [Download](dataset-stage3-800.zip) | 3-stage cropped dataset with the shorter side not exceeding 800 pixels. | | stage3-p512-640 | 339 | [Download](dataset-stage3-p512-640.zip) | 3-stage cropped dataset with the area not less than 512x512 pixels. | | stage3-eyes-640 | 605 | [Download](dataset-stage3-eyes-640.zip) | 3-stage cropped (with eye-focus) dataset with the shorter side not exceeding 640 pixels. | | stage3-eyes-800 | 605 | [Download](dataset-stage3-eyes-800.zip) | 3-stage cropped (with eye-focus) dataset with the shorter side not exceeding 800 pixels. |
AppleHarem/ch_en_arknights
[ "task_categories:text-to-image", "size_categories:n<1K", "license:mit", "art", "not-for-all-audiences", "region:us" ]
2023-11-26T20:29:58+00:00
{"license": "mit", "size_categories": ["n<1K"], "task_categories": ["text-to-image"], "tags": ["art", "not-for-all-audiences"]}
2023-11-26T20:30:48+00:00
[]
[]
TAGS #task_categories-text-to-image #size_categories-n<1K #license-mit #art #not-for-all-audiences #region-us
Dataset of ch'en (Arknights) ============================ This is the dataset of ch'en (Arknights), containing 200 images and their tags. Images are crawled from many sites (e.g. danbooru, pixiv, zerochan ...), the auto-crawling system is powered by DeepGHS Team(huggingface organization).(LittleAppleWebUI)
[]
[ "TAGS\n#task_categories-text-to-image #size_categories-n<1K #license-mit #art #not-for-all-audiences #region-us \n" ]
[ 44 ]
[ "passage: TAGS\n#task_categories-text-to-image #size_categories-n<1K #license-mit #art #not-for-all-audiences #region-us \n" ]
d34776ba21ef8f02d1349349adedcb959060f3f5
# Dataset Card for "hii" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
HossainRabby/UpdatedDataset
[ "region:us" ]
2023-11-26T20:30:35+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 72929903.08721887, "num_examples": 14766}, {"name": "test", "num_bytes": 8104968.91278113, "num_examples": 1641}], "download_size": 26912462, "dataset_size": 81034872.0}}
2023-11-28T18:25:11+00:00
[]
[]
TAGS #region-us
# Dataset Card for "hii" More Information needed
[ "# Dataset Card for \"hii\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"hii\"\n\nMore Information needed" ]
[ 6, 12 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"hii\"\n\nMore Information needed" ]
a2061d30db07e32f34f5d50160bb77b8f4891f78
# Weather prediction prototype database. This database was made using data provided by KMI. This database will only be used to train a prototype. ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **KMI** ## Dataset Structure Normalized columns: - timestamp - air_pressure - relative_humidity - precipitation - wind_speed - wind_direction More information about these columns can be found in the `information_10min.txt` file. <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
syntaxnoob/weather-prediction-prototype-aws
[ "size_categories:100K<n<1M", "license:unlicense", "region:us" ]
2023-11-26T20:47:42+00:00
{"license": "unlicense", "size_categories": ["100K<n<1M"]}
2023-11-30T13:24:58+00:00
[]
[]
TAGS #size_categories-100K<n<1M #license-unlicense #region-us
# Weather prediction prototype database. This database was made using data provided by KMI. This database will only be used to train a prototype. ## Dataset Details ### Dataset Description ### Dataset Sources [optional] - KMI ## Dataset Structure Normalized columns: - timestamp - air_pressure - relative_humidity - precipitation - wind_speed - wind_direction More information about these columns can be found in the 'information_10min.txt' file.
[ "# Weather prediction prototype database.\n\nThis database was made using data provided by KMI.\n\nThis database will only be used to train a prototype.", "## Dataset Details", "### Dataset Description", "### Dataset Sources [optional]\n\n\n- KMI", "## Dataset Structure\n\nNormalized columns: \n- timestamp\n- air_pressure\n- relative_humidity\n- precipitation\n- wind_speed\n- wind_direction\n\nMore information about these columns can be found in the 'information_10min.txt' file." ]
[ "TAGS\n#size_categories-100K<n<1M #license-unlicense #region-us \n", "# Weather prediction prototype database.\n\nThis database was made using data provided by KMI.\n\nThis database will only be used to train a prototype.", "## Dataset Details", "### Dataset Description", "### Dataset Sources [optional]\n\n\n- KMI", "## Dataset Structure\n\nNormalized columns: \n- timestamp\n- air_pressure\n- relative_humidity\n- precipitation\n- wind_speed\n- wind_direction\n\nMore information about these columns can be found in the 'information_10min.txt' file." ]
[ 25, 31, 4, 5, 13, 61 ]
[ "passage: TAGS\n#size_categories-100K<n<1M #license-unlicense #region-us \n# Weather prediction prototype database.\n\nThis database was made using data provided by KMI.\n\nThis database will only be used to train a prototype.## Dataset Details### Dataset Description### Dataset Sources [optional]\n\n\n- KMI## Dataset Structure\n\nNormalized columns: \n- timestamp\n- air_pressure\n- relative_humidity\n- precipitation\n- wind_speed\n- wind_direction\n\nMore information about these columns can be found in the 'information_10min.txt' file." ]
5ef31251dcb6a2038bcaa41fb4e4447706568454
![image/png](https://cdn-uploads.huggingface.co/production/uploads/6563af99abefee6a6b258ce2/XmvBb6Z8G6eFzM7CiDTdW.png) This is a GPT-4 generated question-and-answer dataset based on [The Elements](https://www.amazon.com/Elements-Visual-Exploration-Every-Universe/dp/1579128955/) by [Theodore Gray](https://home.theodoregray.com/).
Tony-Yuan/TheElements
[ "license:apache-2.0", "region:us" ]
2023-11-26T20:52:53+00:00
{"license": "apache-2.0", "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": ["train.csv"]}]}]}
2023-11-27T03:05:55+00:00
[]
[]
TAGS #license-apache-2.0 #region-us
!image/png This is a GPT-4 generated question-and-answer dataset based on The Elements by Theodore Gray.
[]
[ "TAGS\n#license-apache-2.0 #region-us \n" ]
[ 14 ]
[ "passage: TAGS\n#license-apache-2.0 #region-us \n" ]
a4332e42473cb51aeacfeea5df763e169f48ef10
# Bangumi Image Base of Great Pretender This is the image base of bangumi Great Pretender, we detected 50 characters, 3820 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 178 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 20 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 647 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 471 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 86 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 79 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 80 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 100 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 110 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 116 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 48 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 35 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 58 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 23 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 30 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 61 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 33 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 59 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 132 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 24 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 9 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 44 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 25 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 42 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 25 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 25 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 83 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 14 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 265 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | ![preview 8](28/preview_8.png) | | 29 | 16 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | ![preview 7](29/preview_7.png) | ![preview 8](29/preview_8.png) | | 30 | 16 | [Download](30/dataset.zip) | ![preview 1](30/preview_1.png) | ![preview 2](30/preview_2.png) | ![preview 3](30/preview_3.png) | ![preview 4](30/preview_4.png) | ![preview 5](30/preview_5.png) | ![preview 6](30/preview_6.png) | ![preview 7](30/preview_7.png) | ![preview 8](30/preview_8.png) | | 31 | 24 | [Download](31/dataset.zip) | ![preview 1](31/preview_1.png) | ![preview 2](31/preview_2.png) | ![preview 3](31/preview_3.png) | ![preview 4](31/preview_4.png) | ![preview 5](31/preview_5.png) | ![preview 6](31/preview_6.png) | ![preview 7](31/preview_7.png) | ![preview 8](31/preview_8.png) | | 32 | 16 | [Download](32/dataset.zip) | ![preview 1](32/preview_1.png) | ![preview 2](32/preview_2.png) | ![preview 3](32/preview_3.png) | ![preview 4](32/preview_4.png) | ![preview 5](32/preview_5.png) | ![preview 6](32/preview_6.png) | ![preview 7](32/preview_7.png) | ![preview 8](32/preview_8.png) | | 33 | 22 | [Download](33/dataset.zip) | ![preview 1](33/preview_1.png) | ![preview 2](33/preview_2.png) | ![preview 3](33/preview_3.png) | ![preview 4](33/preview_4.png) | ![preview 5](33/preview_5.png) | ![preview 6](33/preview_6.png) | ![preview 7](33/preview_7.png) | ![preview 8](33/preview_8.png) | | 34 | 38 | [Download](34/dataset.zip) | ![preview 1](34/preview_1.png) | ![preview 2](34/preview_2.png) | ![preview 3](34/preview_3.png) | ![preview 4](34/preview_4.png) | ![preview 5](34/preview_5.png) | ![preview 6](34/preview_6.png) | ![preview 7](34/preview_7.png) | ![preview 8](34/preview_8.png) | | 35 | 37 | [Download](35/dataset.zip) | ![preview 1](35/preview_1.png) | ![preview 2](35/preview_2.png) | ![preview 3](35/preview_3.png) | ![preview 4](35/preview_4.png) | ![preview 5](35/preview_5.png) | ![preview 6](35/preview_6.png) | ![preview 7](35/preview_7.png) | ![preview 8](35/preview_8.png) | | 36 | 7 | [Download](36/dataset.zip) | ![preview 1](36/preview_1.png) | ![preview 2](36/preview_2.png) | ![preview 3](36/preview_3.png) | ![preview 4](36/preview_4.png) | ![preview 5](36/preview_5.png) | ![preview 6](36/preview_6.png) | ![preview 7](36/preview_7.png) | N/A | | 37 | 18 | [Download](37/dataset.zip) | ![preview 1](37/preview_1.png) | ![preview 2](37/preview_2.png) | ![preview 3](37/preview_3.png) | ![preview 4](37/preview_4.png) | ![preview 5](37/preview_5.png) | ![preview 6](37/preview_6.png) | ![preview 7](37/preview_7.png) | ![preview 8](37/preview_8.png) | | 38 | 40 | [Download](38/dataset.zip) | ![preview 1](38/preview_1.png) | ![preview 2](38/preview_2.png) | ![preview 3](38/preview_3.png) | ![preview 4](38/preview_4.png) | ![preview 5](38/preview_5.png) | ![preview 6](38/preview_6.png) | ![preview 7](38/preview_7.png) | ![preview 8](38/preview_8.png) | | 39 | 15 | [Download](39/dataset.zip) | ![preview 1](39/preview_1.png) | ![preview 2](39/preview_2.png) | ![preview 3](39/preview_3.png) | ![preview 4](39/preview_4.png) | ![preview 5](39/preview_5.png) | ![preview 6](39/preview_6.png) | ![preview 7](39/preview_7.png) | ![preview 8](39/preview_8.png) | | 40 | 281 | [Download](40/dataset.zip) | ![preview 1](40/preview_1.png) | ![preview 2](40/preview_2.png) | ![preview 3](40/preview_3.png) | ![preview 4](40/preview_4.png) | ![preview 5](40/preview_5.png) | ![preview 6](40/preview_6.png) | ![preview 7](40/preview_7.png) | ![preview 8](40/preview_8.png) | | 41 | 21 | [Download](41/dataset.zip) | ![preview 1](41/preview_1.png) | ![preview 2](41/preview_2.png) | ![preview 3](41/preview_3.png) | ![preview 4](41/preview_4.png) | ![preview 5](41/preview_5.png) | ![preview 6](41/preview_6.png) | ![preview 7](41/preview_7.png) | ![preview 8](41/preview_8.png) | | 42 | 8 | [Download](42/dataset.zip) | ![preview 1](42/preview_1.png) | ![preview 2](42/preview_2.png) | ![preview 3](42/preview_3.png) | ![preview 4](42/preview_4.png) | ![preview 5](42/preview_5.png) | ![preview 6](42/preview_6.png) | ![preview 7](42/preview_7.png) | ![preview 8](42/preview_8.png) | | 43 | 11 | [Download](43/dataset.zip) | ![preview 1](43/preview_1.png) | ![preview 2](43/preview_2.png) | ![preview 3](43/preview_3.png) | ![preview 4](43/preview_4.png) | ![preview 5](43/preview_5.png) | ![preview 6](43/preview_6.png) | ![preview 7](43/preview_7.png) | ![preview 8](43/preview_8.png) | | 44 | 10 | [Download](44/dataset.zip) | ![preview 1](44/preview_1.png) | ![preview 2](44/preview_2.png) | ![preview 3](44/preview_3.png) | ![preview 4](44/preview_4.png) | ![preview 5](44/preview_5.png) | ![preview 6](44/preview_6.png) | ![preview 7](44/preview_7.png) | ![preview 8](44/preview_8.png) | | 45 | 18 | [Download](45/dataset.zip) | ![preview 1](45/preview_1.png) | ![preview 2](45/preview_2.png) | ![preview 3](45/preview_3.png) | ![preview 4](45/preview_4.png) | ![preview 5](45/preview_5.png) | ![preview 6](45/preview_6.png) | ![preview 7](45/preview_7.png) | ![preview 8](45/preview_8.png) | | 46 | 98 | [Download](46/dataset.zip) | ![preview 1](46/preview_1.png) | ![preview 2](46/preview_2.png) | ![preview 3](46/preview_3.png) | ![preview 4](46/preview_4.png) | ![preview 5](46/preview_5.png) | ![preview 6](46/preview_6.png) | ![preview 7](46/preview_7.png) | ![preview 8](46/preview_8.png) | | 47 | 76 | [Download](47/dataset.zip) | ![preview 1](47/preview_1.png) | ![preview 2](47/preview_2.png) | ![preview 3](47/preview_3.png) | ![preview 4](47/preview_4.png) | ![preview 5](47/preview_5.png) | ![preview 6](47/preview_6.png) | ![preview 7](47/preview_7.png) | ![preview 8](47/preview_8.png) | | 48 | 8 | [Download](48/dataset.zip) | ![preview 1](48/preview_1.png) | ![preview 2](48/preview_2.png) | ![preview 3](48/preview_3.png) | ![preview 4](48/preview_4.png) | ![preview 5](48/preview_5.png) | ![preview 6](48/preview_6.png) | ![preview 7](48/preview_7.png) | ![preview 8](48/preview_8.png) | | noise | 118 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/greatpretender
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-11-26T21:59:38+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-11-27T00:43:49+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Great Pretender ===================================== This is the image base of bangumi Great Pretender, we detected 50 characters, 3820 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
273cb1eec620664ae334312a505db42b0e4fbd85
# Bangumi Image Base of Magi - The Labyrinth Of Magic This is the image base of bangumi Magi - The Labyrinth of Magic, we detected 100 characters, 7708 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 1270 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 16 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 22 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 41 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 183 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 107 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 345 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 68 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 47 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 130 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 58 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 89 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 26 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 26 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 45 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 58 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 59 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 79 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 35 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 24 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 51 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 65 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 90 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 42 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 50 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 40 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 58 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 38 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 64 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | ![preview 8](28/preview_8.png) | | 29 | 40 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | ![preview 7](29/preview_7.png) | ![preview 8](29/preview_8.png) | | 30 | 7 | [Download](30/dataset.zip) | ![preview 1](30/preview_1.png) | ![preview 2](30/preview_2.png) | ![preview 3](30/preview_3.png) | ![preview 4](30/preview_4.png) | ![preview 5](30/preview_5.png) | ![preview 6](30/preview_6.png) | ![preview 7](30/preview_7.png) | N/A | | 31 | 34 | [Download](31/dataset.zip) | ![preview 1](31/preview_1.png) | ![preview 2](31/preview_2.png) | ![preview 3](31/preview_3.png) | ![preview 4](31/preview_4.png) | ![preview 5](31/preview_5.png) | ![preview 6](31/preview_6.png) | ![preview 7](31/preview_7.png) | ![preview 8](31/preview_8.png) | | 32 | 44 | [Download](32/dataset.zip) | ![preview 1](32/preview_1.png) | ![preview 2](32/preview_2.png) | ![preview 3](32/preview_3.png) | ![preview 4](32/preview_4.png) | ![preview 5](32/preview_5.png) | ![preview 6](32/preview_6.png) | ![preview 7](32/preview_7.png) | ![preview 8](32/preview_8.png) | | 33 | 33 | [Download](33/dataset.zip) | ![preview 1](33/preview_1.png) | ![preview 2](33/preview_2.png) | ![preview 3](33/preview_3.png) | ![preview 4](33/preview_4.png) | ![preview 5](33/preview_5.png) | ![preview 6](33/preview_6.png) | ![preview 7](33/preview_7.png) | ![preview 8](33/preview_8.png) | | 34 | 65 | [Download](34/dataset.zip) | ![preview 1](34/preview_1.png) | ![preview 2](34/preview_2.png) | ![preview 3](34/preview_3.png) | ![preview 4](34/preview_4.png) | ![preview 5](34/preview_5.png) | ![preview 6](34/preview_6.png) | ![preview 7](34/preview_7.png) | ![preview 8](34/preview_8.png) | | 35 | 79 | [Download](35/dataset.zip) | ![preview 1](35/preview_1.png) | ![preview 2](35/preview_2.png) | ![preview 3](35/preview_3.png) | ![preview 4](35/preview_4.png) | ![preview 5](35/preview_5.png) | ![preview 6](35/preview_6.png) | ![preview 7](35/preview_7.png) | ![preview 8](35/preview_8.png) | | 36 | 16 | [Download](36/dataset.zip) | ![preview 1](36/preview_1.png) | ![preview 2](36/preview_2.png) | ![preview 3](36/preview_3.png) | ![preview 4](36/preview_4.png) | ![preview 5](36/preview_5.png) | ![preview 6](36/preview_6.png) | ![preview 7](36/preview_7.png) | ![preview 8](36/preview_8.png) | | 37 | 26 | [Download](37/dataset.zip) | ![preview 1](37/preview_1.png) | ![preview 2](37/preview_2.png) | ![preview 3](37/preview_3.png) | ![preview 4](37/preview_4.png) | ![preview 5](37/preview_5.png) | ![preview 6](37/preview_6.png) | ![preview 7](37/preview_7.png) | ![preview 8](37/preview_8.png) | | 38 | 15 | [Download](38/dataset.zip) | ![preview 1](38/preview_1.png) | ![preview 2](38/preview_2.png) | ![preview 3](38/preview_3.png) | ![preview 4](38/preview_4.png) | ![preview 5](38/preview_5.png) | ![preview 6](38/preview_6.png) | ![preview 7](38/preview_7.png) | ![preview 8](38/preview_8.png) | | 39 | 23 | [Download](39/dataset.zip) | ![preview 1](39/preview_1.png) | ![preview 2](39/preview_2.png) | ![preview 3](39/preview_3.png) | ![preview 4](39/preview_4.png) | ![preview 5](39/preview_5.png) | ![preview 6](39/preview_6.png) | ![preview 7](39/preview_7.png) | ![preview 8](39/preview_8.png) | | 40 | 30 | [Download](40/dataset.zip) | ![preview 1](40/preview_1.png) | ![preview 2](40/preview_2.png) | ![preview 3](40/preview_3.png) | ![preview 4](40/preview_4.png) | ![preview 5](40/preview_5.png) | ![preview 6](40/preview_6.png) | ![preview 7](40/preview_7.png) | ![preview 8](40/preview_8.png) | | 41 | 162 | [Download](41/dataset.zip) | ![preview 1](41/preview_1.png) | ![preview 2](41/preview_2.png) | ![preview 3](41/preview_3.png) | ![preview 4](41/preview_4.png) | ![preview 5](41/preview_5.png) | ![preview 6](41/preview_6.png) | ![preview 7](41/preview_7.png) | ![preview 8](41/preview_8.png) | | 42 | 413 | [Download](42/dataset.zip) | ![preview 1](42/preview_1.png) | ![preview 2](42/preview_2.png) | ![preview 3](42/preview_3.png) | ![preview 4](42/preview_4.png) | ![preview 5](42/preview_5.png) | ![preview 6](42/preview_6.png) | ![preview 7](42/preview_7.png) | ![preview 8](42/preview_8.png) | | 43 | 27 | [Download](43/dataset.zip) | ![preview 1](43/preview_1.png) | ![preview 2](43/preview_2.png) | ![preview 3](43/preview_3.png) | ![preview 4](43/preview_4.png) | ![preview 5](43/preview_5.png) | ![preview 6](43/preview_6.png) | ![preview 7](43/preview_7.png) | ![preview 8](43/preview_8.png) | | 44 | 24 | [Download](44/dataset.zip) | ![preview 1](44/preview_1.png) | ![preview 2](44/preview_2.png) | ![preview 3](44/preview_3.png) | ![preview 4](44/preview_4.png) | ![preview 5](44/preview_5.png) | ![preview 6](44/preview_6.png) | ![preview 7](44/preview_7.png) | ![preview 8](44/preview_8.png) | | 45 | 15 | [Download](45/dataset.zip) | ![preview 1](45/preview_1.png) | ![preview 2](45/preview_2.png) | ![preview 3](45/preview_3.png) | ![preview 4](45/preview_4.png) | ![preview 5](45/preview_5.png) | ![preview 6](45/preview_6.png) | ![preview 7](45/preview_7.png) | ![preview 8](45/preview_8.png) | | 46 | 52 | [Download](46/dataset.zip) | ![preview 1](46/preview_1.png) | ![preview 2](46/preview_2.png) | ![preview 3](46/preview_3.png) | ![preview 4](46/preview_4.png) | ![preview 5](46/preview_5.png) | ![preview 6](46/preview_6.png) | ![preview 7](46/preview_7.png) | ![preview 8](46/preview_8.png) | | 47 | 25 | [Download](47/dataset.zip) | ![preview 1](47/preview_1.png) | ![preview 2](47/preview_2.png) | ![preview 3](47/preview_3.png) | ![preview 4](47/preview_4.png) | ![preview 5](47/preview_5.png) | ![preview 6](47/preview_6.png) | ![preview 7](47/preview_7.png) | ![preview 8](47/preview_8.png) | | 48 | 21 | [Download](48/dataset.zip) | ![preview 1](48/preview_1.png) | ![preview 2](48/preview_2.png) | ![preview 3](48/preview_3.png) | ![preview 4](48/preview_4.png) | ![preview 5](48/preview_5.png) | ![preview 6](48/preview_6.png) | ![preview 7](48/preview_7.png) | ![preview 8](48/preview_8.png) | | 49 | 75 | [Download](49/dataset.zip) | ![preview 1](49/preview_1.png) | ![preview 2](49/preview_2.png) | ![preview 3](49/preview_3.png) | ![preview 4](49/preview_4.png) | ![preview 5](49/preview_5.png) | ![preview 6](49/preview_6.png) | ![preview 7](49/preview_7.png) | ![preview 8](49/preview_8.png) | | 50 | 20 | [Download](50/dataset.zip) | ![preview 1](50/preview_1.png) | ![preview 2](50/preview_2.png) | ![preview 3](50/preview_3.png) | ![preview 4](50/preview_4.png) | ![preview 5](50/preview_5.png) | ![preview 6](50/preview_6.png) | ![preview 7](50/preview_7.png) | ![preview 8](50/preview_8.png) | | 51 | 188 | [Download](51/dataset.zip) | ![preview 1](51/preview_1.png) | ![preview 2](51/preview_2.png) | ![preview 3](51/preview_3.png) | ![preview 4](51/preview_4.png) | ![preview 5](51/preview_5.png) | ![preview 6](51/preview_6.png) | ![preview 7](51/preview_7.png) | ![preview 8](51/preview_8.png) | | 52 | 23 | [Download](52/dataset.zip) | ![preview 1](52/preview_1.png) | ![preview 2](52/preview_2.png) | ![preview 3](52/preview_3.png) | ![preview 4](52/preview_4.png) | ![preview 5](52/preview_5.png) | ![preview 6](52/preview_6.png) | ![preview 7](52/preview_7.png) | ![preview 8](52/preview_8.png) | | 53 | 29 | [Download](53/dataset.zip) | ![preview 1](53/preview_1.png) | ![preview 2](53/preview_2.png) | ![preview 3](53/preview_3.png) | ![preview 4](53/preview_4.png) | ![preview 5](53/preview_5.png) | ![preview 6](53/preview_6.png) | ![preview 7](53/preview_7.png) | ![preview 8](53/preview_8.png) | | 54 | 55 | [Download](54/dataset.zip) | ![preview 1](54/preview_1.png) | ![preview 2](54/preview_2.png) | ![preview 3](54/preview_3.png) | ![preview 4](54/preview_4.png) | ![preview 5](54/preview_5.png) | ![preview 6](54/preview_6.png) | ![preview 7](54/preview_7.png) | ![preview 8](54/preview_8.png) | | 55 | 24 | [Download](55/dataset.zip) | ![preview 1](55/preview_1.png) | ![preview 2](55/preview_2.png) | ![preview 3](55/preview_3.png) | ![preview 4](55/preview_4.png) | ![preview 5](55/preview_5.png) | ![preview 6](55/preview_6.png) | ![preview 7](55/preview_7.png) | ![preview 8](55/preview_8.png) | | 56 | 13 | [Download](56/dataset.zip) | ![preview 1](56/preview_1.png) | ![preview 2](56/preview_2.png) | ![preview 3](56/preview_3.png) | ![preview 4](56/preview_4.png) | ![preview 5](56/preview_5.png) | ![preview 6](56/preview_6.png) | ![preview 7](56/preview_7.png) | ![preview 8](56/preview_8.png) | | 57 | 36 | [Download](57/dataset.zip) | ![preview 1](57/preview_1.png) | ![preview 2](57/preview_2.png) | ![preview 3](57/preview_3.png) | ![preview 4](57/preview_4.png) | ![preview 5](57/preview_5.png) | ![preview 6](57/preview_6.png) | ![preview 7](57/preview_7.png) | ![preview 8](57/preview_8.png) | | 58 | 7 | [Download](58/dataset.zip) | ![preview 1](58/preview_1.png) | ![preview 2](58/preview_2.png) | ![preview 3](58/preview_3.png) | ![preview 4](58/preview_4.png) | ![preview 5](58/preview_5.png) | ![preview 6](58/preview_6.png) | ![preview 7](58/preview_7.png) | N/A | | 59 | 16 | [Download](59/dataset.zip) | ![preview 1](59/preview_1.png) | ![preview 2](59/preview_2.png) | ![preview 3](59/preview_3.png) | ![preview 4](59/preview_4.png) | ![preview 5](59/preview_5.png) | ![preview 6](59/preview_6.png) | ![preview 7](59/preview_7.png) | ![preview 8](59/preview_8.png) | | 60 | 22 | [Download](60/dataset.zip) | ![preview 1](60/preview_1.png) | ![preview 2](60/preview_2.png) | ![preview 3](60/preview_3.png) | ![preview 4](60/preview_4.png) | ![preview 5](60/preview_5.png) | ![preview 6](60/preview_6.png) | ![preview 7](60/preview_7.png) | ![preview 8](60/preview_8.png) | | 61 | 28 | [Download](61/dataset.zip) | ![preview 1](61/preview_1.png) | ![preview 2](61/preview_2.png) | ![preview 3](61/preview_3.png) | ![preview 4](61/preview_4.png) | ![preview 5](61/preview_5.png) | ![preview 6](61/preview_6.png) | ![preview 7](61/preview_7.png) | ![preview 8](61/preview_8.png) | | 62 | 18 | [Download](62/dataset.zip) | ![preview 1](62/preview_1.png) | ![preview 2](62/preview_2.png) | ![preview 3](62/preview_3.png) | ![preview 4](62/preview_4.png) | ![preview 5](62/preview_5.png) | ![preview 6](62/preview_6.png) | ![preview 7](62/preview_7.png) | ![preview 8](62/preview_8.png) | | 63 | 1272 | [Download](63/dataset.zip) | ![preview 1](63/preview_1.png) | ![preview 2](63/preview_2.png) | ![preview 3](63/preview_3.png) | ![preview 4](63/preview_4.png) | ![preview 5](63/preview_5.png) | ![preview 6](63/preview_6.png) | ![preview 7](63/preview_7.png) | ![preview 8](63/preview_8.png) | | 64 | 88 | [Download](64/dataset.zip) | ![preview 1](64/preview_1.png) | ![preview 2](64/preview_2.png) | ![preview 3](64/preview_3.png) | ![preview 4](64/preview_4.png) | ![preview 5](64/preview_5.png) | ![preview 6](64/preview_6.png) | ![preview 7](64/preview_7.png) | ![preview 8](64/preview_8.png) | | 65 | 159 | [Download](65/dataset.zip) | ![preview 1](65/preview_1.png) | ![preview 2](65/preview_2.png) | ![preview 3](65/preview_3.png) | ![preview 4](65/preview_4.png) | ![preview 5](65/preview_5.png) | ![preview 6](65/preview_6.png) | ![preview 7](65/preview_7.png) | ![preview 8](65/preview_8.png) | | 66 | 15 | [Download](66/dataset.zip) | ![preview 1](66/preview_1.png) | ![preview 2](66/preview_2.png) | ![preview 3](66/preview_3.png) | ![preview 4](66/preview_4.png) | ![preview 5](66/preview_5.png) | ![preview 6](66/preview_6.png) | ![preview 7](66/preview_7.png) | ![preview 8](66/preview_8.png) | | 67 | 35 | [Download](67/dataset.zip) | ![preview 1](67/preview_1.png) | ![preview 2](67/preview_2.png) | ![preview 3](67/preview_3.png) | ![preview 4](67/preview_4.png) | ![preview 5](67/preview_5.png) | ![preview 6](67/preview_6.png) | ![preview 7](67/preview_7.png) | ![preview 8](67/preview_8.png) | | 68 | 85 | [Download](68/dataset.zip) | ![preview 1](68/preview_1.png) | ![preview 2](68/preview_2.png) | ![preview 3](68/preview_3.png) | ![preview 4](68/preview_4.png) | ![preview 5](68/preview_5.png) | ![preview 6](68/preview_6.png) | ![preview 7](68/preview_7.png) | ![preview 8](68/preview_8.png) | | 69 | 21 | [Download](69/dataset.zip) | ![preview 1](69/preview_1.png) | ![preview 2](69/preview_2.png) | ![preview 3](69/preview_3.png) | ![preview 4](69/preview_4.png) | ![preview 5](69/preview_5.png) | ![preview 6](69/preview_6.png) | ![preview 7](69/preview_7.png) | ![preview 8](69/preview_8.png) | | 70 | 22 | [Download](70/dataset.zip) | ![preview 1](70/preview_1.png) | ![preview 2](70/preview_2.png) | ![preview 3](70/preview_3.png) | ![preview 4](70/preview_4.png) | ![preview 5](70/preview_5.png) | ![preview 6](70/preview_6.png) | ![preview 7](70/preview_7.png) | ![preview 8](70/preview_8.png) | | 71 | 20 | [Download](71/dataset.zip) | ![preview 1](71/preview_1.png) | ![preview 2](71/preview_2.png) | ![preview 3](71/preview_3.png) | ![preview 4](71/preview_4.png) | ![preview 5](71/preview_5.png) | ![preview 6](71/preview_6.png) | ![preview 7](71/preview_7.png) | ![preview 8](71/preview_8.png) | | 72 | 15 | [Download](72/dataset.zip) | ![preview 1](72/preview_1.png) | ![preview 2](72/preview_2.png) | ![preview 3](72/preview_3.png) | ![preview 4](72/preview_4.png) | ![preview 5](72/preview_5.png) | ![preview 6](72/preview_6.png) | ![preview 7](72/preview_7.png) | ![preview 8](72/preview_8.png) | | 73 | 11 | [Download](73/dataset.zip) | ![preview 1](73/preview_1.png) | ![preview 2](73/preview_2.png) | ![preview 3](73/preview_3.png) | ![preview 4](73/preview_4.png) | ![preview 5](73/preview_5.png) | ![preview 6](73/preview_6.png) | ![preview 7](73/preview_7.png) | ![preview 8](73/preview_8.png) | | 74 | 14 | [Download](74/dataset.zip) | ![preview 1](74/preview_1.png) | ![preview 2](74/preview_2.png) | ![preview 3](74/preview_3.png) | ![preview 4](74/preview_4.png) | ![preview 5](74/preview_5.png) | ![preview 6](74/preview_6.png) | ![preview 7](74/preview_7.png) | ![preview 8](74/preview_8.png) | | 75 | 15 | [Download](75/dataset.zip) | ![preview 1](75/preview_1.png) | ![preview 2](75/preview_2.png) | ![preview 3](75/preview_3.png) | ![preview 4](75/preview_4.png) | ![preview 5](75/preview_5.png) | ![preview 6](75/preview_6.png) | ![preview 7](75/preview_7.png) | ![preview 8](75/preview_8.png) | | 76 | 14 | [Download](76/dataset.zip) | ![preview 1](76/preview_1.png) | ![preview 2](76/preview_2.png) | ![preview 3](76/preview_3.png) | ![preview 4](76/preview_4.png) | ![preview 5](76/preview_5.png) | ![preview 6](76/preview_6.png) | ![preview 7](76/preview_7.png) | ![preview 8](76/preview_8.png) | | 77 | 29 | [Download](77/dataset.zip) | ![preview 1](77/preview_1.png) | ![preview 2](77/preview_2.png) | ![preview 3](77/preview_3.png) | ![preview 4](77/preview_4.png) | ![preview 5](77/preview_5.png) | ![preview 6](77/preview_6.png) | ![preview 7](77/preview_7.png) | ![preview 8](77/preview_8.png) | | 78 | 14 | [Download](78/dataset.zip) | ![preview 1](78/preview_1.png) | ![preview 2](78/preview_2.png) | ![preview 3](78/preview_3.png) | ![preview 4](78/preview_4.png) | ![preview 5](78/preview_5.png) | ![preview 6](78/preview_6.png) | ![preview 7](78/preview_7.png) | ![preview 8](78/preview_8.png) | | 79 | 7 | [Download](79/dataset.zip) | ![preview 1](79/preview_1.png) | ![preview 2](79/preview_2.png) | ![preview 3](79/preview_3.png) | ![preview 4](79/preview_4.png) | ![preview 5](79/preview_5.png) | ![preview 6](79/preview_6.png) | ![preview 7](79/preview_7.png) | N/A | | 80 | 11 | [Download](80/dataset.zip) | ![preview 1](80/preview_1.png) | ![preview 2](80/preview_2.png) | ![preview 3](80/preview_3.png) | ![preview 4](80/preview_4.png) | ![preview 5](80/preview_5.png) | ![preview 6](80/preview_6.png) | ![preview 7](80/preview_7.png) | ![preview 8](80/preview_8.png) | | 81 | 11 | [Download](81/dataset.zip) | ![preview 1](81/preview_1.png) | ![preview 2](81/preview_2.png) | ![preview 3](81/preview_3.png) | ![preview 4](81/preview_4.png) | ![preview 5](81/preview_5.png) | ![preview 6](81/preview_6.png) | ![preview 7](81/preview_7.png) | ![preview 8](81/preview_8.png) | | 82 | 16 | [Download](82/dataset.zip) | ![preview 1](82/preview_1.png) | ![preview 2](82/preview_2.png) | ![preview 3](82/preview_3.png) | ![preview 4](82/preview_4.png) | ![preview 5](82/preview_5.png) | ![preview 6](82/preview_6.png) | ![preview 7](82/preview_7.png) | ![preview 8](82/preview_8.png) | | 83 | 28 | [Download](83/dataset.zip) | ![preview 1](83/preview_1.png) | ![preview 2](83/preview_2.png) | ![preview 3](83/preview_3.png) | ![preview 4](83/preview_4.png) | ![preview 5](83/preview_5.png) | ![preview 6](83/preview_6.png) | ![preview 7](83/preview_7.png) | ![preview 8](83/preview_8.png) | | 84 | 11 | [Download](84/dataset.zip) | ![preview 1](84/preview_1.png) | ![preview 2](84/preview_2.png) | ![preview 3](84/preview_3.png) | ![preview 4](84/preview_4.png) | ![preview 5](84/preview_5.png) | ![preview 6](84/preview_6.png) | ![preview 7](84/preview_7.png) | ![preview 8](84/preview_8.png) | | 85 | 11 | [Download](85/dataset.zip) | ![preview 1](85/preview_1.png) | ![preview 2](85/preview_2.png) | ![preview 3](85/preview_3.png) | ![preview 4](85/preview_4.png) | ![preview 5](85/preview_5.png) | ![preview 6](85/preview_6.png) | ![preview 7](85/preview_7.png) | ![preview 8](85/preview_8.png) | | 86 | 9 | [Download](86/dataset.zip) | ![preview 1](86/preview_1.png) | ![preview 2](86/preview_2.png) | ![preview 3](86/preview_3.png) | ![preview 4](86/preview_4.png) | ![preview 5](86/preview_5.png) | ![preview 6](86/preview_6.png) | ![preview 7](86/preview_7.png) | ![preview 8](86/preview_8.png) | | 87 | 12 | [Download](87/dataset.zip) | ![preview 1](87/preview_1.png) | ![preview 2](87/preview_2.png) | ![preview 3](87/preview_3.png) | ![preview 4](87/preview_4.png) | ![preview 5](87/preview_5.png) | ![preview 6](87/preview_6.png) | ![preview 7](87/preview_7.png) | ![preview 8](87/preview_8.png) | | 88 | 89 | [Download](88/dataset.zip) | ![preview 1](88/preview_1.png) | ![preview 2](88/preview_2.png) | ![preview 3](88/preview_3.png) | ![preview 4](88/preview_4.png) | ![preview 5](88/preview_5.png) | ![preview 6](88/preview_6.png) | ![preview 7](88/preview_7.png) | ![preview 8](88/preview_8.png) | | 89 | 52 | [Download](89/dataset.zip) | ![preview 1](89/preview_1.png) | ![preview 2](89/preview_2.png) | ![preview 3](89/preview_3.png) | ![preview 4](89/preview_4.png) | ![preview 5](89/preview_5.png) | ![preview 6](89/preview_6.png) | ![preview 7](89/preview_7.png) | ![preview 8](89/preview_8.png) | | 90 | 8 | [Download](90/dataset.zip) | ![preview 1](90/preview_1.png) | ![preview 2](90/preview_2.png) | ![preview 3](90/preview_3.png) | ![preview 4](90/preview_4.png) | ![preview 5](90/preview_5.png) | ![preview 6](90/preview_6.png) | ![preview 7](90/preview_7.png) | ![preview 8](90/preview_8.png) | | 91 | 21 | [Download](91/dataset.zip) | ![preview 1](91/preview_1.png) | ![preview 2](91/preview_2.png) | ![preview 3](91/preview_3.png) | ![preview 4](91/preview_4.png) | ![preview 5](91/preview_5.png) | ![preview 6](91/preview_6.png) | ![preview 7](91/preview_7.png) | ![preview 8](91/preview_8.png) | | 92 | 14 | [Download](92/dataset.zip) | ![preview 1](92/preview_1.png) | ![preview 2](92/preview_2.png) | ![preview 3](92/preview_3.png) | ![preview 4](92/preview_4.png) | ![preview 5](92/preview_5.png) | ![preview 6](92/preview_6.png) | ![preview 7](92/preview_7.png) | ![preview 8](92/preview_8.png) | | 93 | 15 | [Download](93/dataset.zip) | ![preview 1](93/preview_1.png) | ![preview 2](93/preview_2.png) | ![preview 3](93/preview_3.png) | ![preview 4](93/preview_4.png) | ![preview 5](93/preview_5.png) | ![preview 6](93/preview_6.png) | ![preview 7](93/preview_7.png) | ![preview 8](93/preview_8.png) | | 94 | 10 | [Download](94/dataset.zip) | ![preview 1](94/preview_1.png) | ![preview 2](94/preview_2.png) | ![preview 3](94/preview_3.png) | ![preview 4](94/preview_4.png) | ![preview 5](94/preview_5.png) | ![preview 6](94/preview_6.png) | ![preview 7](94/preview_7.png) | ![preview 8](94/preview_8.png) | | 95 | 8 | [Download](95/dataset.zip) | ![preview 1](95/preview_1.png) | ![preview 2](95/preview_2.png) | ![preview 3](95/preview_3.png) | ![preview 4](95/preview_4.png) | ![preview 5](95/preview_5.png) | ![preview 6](95/preview_6.png) | ![preview 7](95/preview_7.png) | ![preview 8](95/preview_8.png) | | 96 | 11 | [Download](96/dataset.zip) | ![preview 1](96/preview_1.png) | ![preview 2](96/preview_2.png) | ![preview 3](96/preview_3.png) | ![preview 4](96/preview_4.png) | ![preview 5](96/preview_5.png) | ![preview 6](96/preview_6.png) | ![preview 7](96/preview_7.png) | ![preview 8](96/preview_8.png) | | 97 | 60 | [Download](97/dataset.zip) | ![preview 1](97/preview_1.png) | ![preview 2](97/preview_2.png) | ![preview 3](97/preview_3.png) | ![preview 4](97/preview_4.png) | ![preview 5](97/preview_5.png) | ![preview 6](97/preview_6.png) | ![preview 7](97/preview_7.png) | ![preview 8](97/preview_8.png) | | 98 | 6 | [Download](98/dataset.zip) | ![preview 1](98/preview_1.png) | ![preview 2](98/preview_2.png) | ![preview 3](98/preview_3.png) | ![preview 4](98/preview_4.png) | ![preview 5](98/preview_5.png) | ![preview 6](98/preview_6.png) | N/A | N/A | | noise | 538 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/magithelabyrinthofmagic
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-11-26T22:01:03+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-11-27T01:34:39+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Magi - The Labyrinth Of Magic =================================================== This is the image base of bangumi Magi - The Labyrinth of Magic, we detected 100 characters, 7708 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
9b3117b86b52b7e80b49aaa03689ef1a8001bed6
# NLtoCypherDataset ## Description This dataset, named "NLtoCypherDataset", is designed for the task of translating natural language instructions into Cypher query language. It is particularly useful for training and evaluating models on sequence-to-sequence tasks where the input is a natural language instruction, and the output is the corresponding Cypher query. ## Dataset Structure ### Data Fields - `Instruction`: A string containing a natural language instruction or query. - `Cypher Queries`: The corresponding Cypher query that represents the natural language instruction. ### Data Splits The dataset is divided into two splits: - `train`: Contains [358] samples for training purposes. - `validation`: Comprises [100] samples for evaluating the model's performance.
MidhunKanadan/NLtoCypherDataset
[ "license:mit", "region:us" ]
2023-11-26T22:06:35+00:00
{"license": "mit"}
2023-11-26T22:17:20+00:00
[]
[]
TAGS #license-mit #region-us
# NLtoCypherDataset ## Description This dataset, named "NLtoCypherDataset", is designed for the task of translating natural language instructions into Cypher query language. It is particularly useful for training and evaluating models on sequence-to-sequence tasks where the input is a natural language instruction, and the output is the corresponding Cypher query. ## Dataset Structure ### Data Fields - 'Instruction': A string containing a natural language instruction or query. - 'Cypher Queries': The corresponding Cypher query that represents the natural language instruction. ### Data Splits The dataset is divided into two splits: - 'train': Contains [358] samples for training purposes. - 'validation': Comprises [100] samples for evaluating the model's performance.
[ "# NLtoCypherDataset", "## Description\n\nThis dataset, named \"NLtoCypherDataset\", is designed for the task of translating natural language instructions into Cypher query language. It is particularly useful for training and evaluating models on sequence-to-sequence tasks where the input is a natural language instruction, and the output is the corresponding Cypher query.", "## Dataset Structure", "### Data Fields\n\n- 'Instruction': A string containing a natural language instruction or query.\n- 'Cypher Queries': The corresponding Cypher query that represents the natural language instruction.", "### Data Splits\n\nThe dataset is divided into two splits:\n- 'train': Contains [358] samples for training purposes.\n- 'validation': Comprises [100] samples for evaluating the model's performance." ]
[ "TAGS\n#license-mit #region-us \n", "# NLtoCypherDataset", "## Description\n\nThis dataset, named \"NLtoCypherDataset\", is designed for the task of translating natural language instructions into Cypher query language. It is particularly useful for training and evaluating models on sequence-to-sequence tasks where the input is a natural language instruction, and the output is the corresponding Cypher query.", "## Dataset Structure", "### Data Fields\n\n- 'Instruction': A string containing a natural language instruction or query.\n- 'Cypher Queries': The corresponding Cypher query that represents the natural language instruction.", "### Data Splits\n\nThe dataset is divided into two splits:\n- 'train': Contains [358] samples for training purposes.\n- 'validation': Comprises [100] samples for evaluating the model's performance." ]
[ 11, 9, 80, 6, 50, 58 ]
[ "passage: TAGS\n#license-mit #region-us \n# NLtoCypherDataset## Description\n\nThis dataset, named \"NLtoCypherDataset\", is designed for the task of translating natural language instructions into Cypher query language. It is particularly useful for training and evaluating models on sequence-to-sequence tasks where the input is a natural language instruction, and the output is the corresponding Cypher query.## Dataset Structure### Data Fields\n\n- 'Instruction': A string containing a natural language instruction or query.\n- 'Cypher Queries': The corresponding Cypher query that represents the natural language instruction.### Data Splits\n\nThe dataset is divided into two splits:\n- 'train': Contains [358] samples for training purposes.\n- 'validation': Comprises [100] samples for evaluating the model's performance." ]
3a2a34c0c35aac0293fca20e06deeb09b645cf4f
# Pontoon Translations Amazigh subset of [Pontoon Translations](https://huggingface.co/datasets/ayymen/Pontoon-Translations).
Tamazight-NLP/Pontoon-Translations
[ "task_categories:translation", "task_categories:text2text-generation", "size_categories:10K<n<100K", "language:ber", "language:zgh", "language:kab", "language:tzm", "language:shi", "language:en", "license:mpl-2.0", "region:us" ]
2023-11-26T22:08:41+00:00
{"language": ["ber", "zgh", "kab", "tzm", "shi", "en"], "license": "mpl-2.0", "size_categories": ["10K<n<100K"], "task_categories": ["translation", "text2text-generation"], "pretty_name": "Pontoon Translations", "configs": [{"config_name": "en-zgh", "data_files": "en-zgh.tsv", "default": true}, {"config_name": "en-kab", "data_files": "en-kab.tsv"}, {"config_name": "en-tzm", "data_files": "en-tzm.tsv"}, {"config_name": "en-shi", "data_files": "en-shi.tsv"}]}
2024-01-15T13:58:49+00:00
[]
[ "ber", "zgh", "kab", "tzm", "shi", "en" ]
TAGS #task_categories-translation #task_categories-text2text-generation #size_categories-10K<n<100K #language-ber #language-Standard Moroccan Tamazight #language-Kabyle #language-Central Atlas Tamazight #language-Tachelhit #language-English #license-mpl-2.0 #region-us
# Pontoon Translations Amazigh subset of Pontoon Translations.
[ "# Pontoon Translations\nAmazigh subset of Pontoon Translations." ]
[ "TAGS\n#task_categories-translation #task_categories-text2text-generation #size_categories-10K<n<100K #language-ber #language-Standard Moroccan Tamazight #language-Kabyle #language-Central Atlas Tamazight #language-Tachelhit #language-English #license-mpl-2.0 #region-us \n", "# Pontoon Translations\nAmazigh subset of Pontoon Translations." ]
[ 87, 16 ]
[ "passage: TAGS\n#task_categories-translation #task_categories-text2text-generation #size_categories-10K<n<100K #language-ber #language-Standard Moroccan Tamazight #language-Kabyle #language-Central Atlas Tamazight #language-Tachelhit #language-English #license-mpl-2.0 #region-us \n# Pontoon Translations\nAmazigh subset of Pontoon Translations." ]
344b171f556fa51f0480a0643fa3c94f6e514fc9
``` --- configs: - config_name: default data_files: - split: train path: data/train-* - split: validation path: data/validation-* - split: test path: data/test-* dataset_info: features: - name: sentence1 dtype: string - name: sentence2 dtype: string - name: annotator_labels sequence: string - name: genre dtype: string - name: gold_label dtype: string - name: pairID dtype: string splits: - name: train num_bytes: 92877120 num_examples: 380800 - name: validation num_bytes: 5903876 num_examples: 19392 - name: test num_bytes: 5321727 num_examples: 19040 download_size: 58511174 dataset_size: 104102723 --- ``` # Dataset Card for "MultiNLI_Dutch_translated_with_Marianmt" Translation of the **English** corpus [Multi-Genre Natural Language Inference (MultiNLI) corpus ](https://cims.nyu.edu/~sbowman/multinli/), specifically the *mismatched version*, to **Dutch** using an [Maria NMT model](https://marian-nmt.github.io/), trained by [Helsinki NLP](https://huggingface.co/Helsinki-NLP/opus-mt-en-nl). Note, for reference: Maria NMT is based on [BART](https://huggingface.co/docs/transformers/model_doc/bart), described [here](https://arxiv.org/abs/1910.13461). A complete description of the dataset is given [here](https://cims.nyu.edu/~sbowman/multinli/), and is available on [HuggingFace](https://huggingface.co/datasets/multi_nli_mismatch). # Attribution If you use this dataset please use the following to credit the creators of MultiNLI: ```citation @InProceedings{N18-1101, author = "Williams, Adina and Nangia, Nikita and Bowman, Samuel", title = "A Broad-Coverage Challenge Corpus for Sentence Understanding through Inference", booktitle = "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers)", year = "2018", publisher = "Association for Computational Linguistics", pages = "1112--1122", location = "New Orleans, Louisiana", url = "http://aclweb.org/anthology/N18-1101" } ``` The creators of the OPUS-MT models: ``` @InProceedings{TiedemannThottingal:EAMT2020, author = {J{\"o}rg Tiedemann and Santhosh Thottingal}, title = {{OPUS-MT} — {B}uilding open translation services for the {W}orld}, booktitle = {Proceedings of the 22nd Annual Conferenec of the European Association for Machine Translation (EAMT)}, year = {2020}, address = {Lisbon, Portugal} } ``` and ``` @misc {van_es_2023, author = { {Bram van Es} }, title = { MultiNLI_Dutch_translated_with_Marianmt (Revision 284d39a) }, year = 2023, url = { https://huggingface.co/datasets/UMCU/MultiNLI_Dutch_translated_with_Marianmt }, doi = { 10.57967/hf/1417 }, publisher = { Hugging Face } } ``` # License For both the Maria NMT model and the original [Helsinki NLP](https://twitter.com/HelsinkiNLP) [Opus MT model](https://huggingface.co/Helsinki-NLP) we did **not** find a license, if this was in error please let us know and we will add the appropriate licensing promptly. We adopt the licensing of the MultiNLI corpus, which is stated as follows in the accompanying publication: *The majority of the corpus is released under the OANC’s license, which allows all content to be freely used, modified, and shared under permissive terms. The data in the FICTION section falls under several permissive licenses; Seven Swords is available under a Creative Commons Share-Alike 3.0 Unported License, and with the explicit permission of the author, Living History and Password Incorrect are available under Creative Commons Attribution 3.0 Unported Licenses; the remaining works of fiction are in the public domain in the United States (but may be licensed differently elsewhere).*
UMCU/MultiNLI_Dutch_translated_with_Marianmt
[ "task_categories:sentence-similarity", "size_categories:100K<n<1M", "language:nl", "license:creativeml-openrail-m", "arxiv:1910.13461", "doi:10.57967/hf/1417", "region:us" ]
2023-11-26T22:24:32+00:00
{"language": ["nl"], "license": "creativeml-openrail-m", "size_categories": ["100K<n<1M"], "task_categories": ["sentence-similarity"], "pretty_name": "Dutch MultiNLI(MM) using MariaNMT", "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "sentence1", "dtype": "string"}, {"name": "sentence2", "dtype": "string"}, {"name": "annotator_labels", "sequence": "string"}, {"name": "genre", "dtype": "string"}, {"name": "gold_label", "dtype": "string"}, {"name": "pairID", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 92877120, "num_examples": 380800}, {"name": "validation", "num_bytes": 5903876, "num_examples": 19392}, {"name": "test", "num_bytes": 5321727, "num_examples": 19040}], "download_size": 58511174, "dataset_size": 104102723}}
2023-12-01T19:21:37+00:00
[ "1910.13461" ]
[ "nl" ]
TAGS #task_categories-sentence-similarity #size_categories-100K<n<1M #language-Dutch #license-creativeml-openrail-m #arxiv-1910.13461 #doi-10.57967/hf/1417 #region-us
# Dataset Card for "MultiNLI_Dutch_translated_with_Marianmt" Translation of the English corpus Multi-Genre Natural Language Inference (MultiNLI) corpus , specifically the *mismatched version*, to Dutch using an Maria NMT model, trained by Helsinki NLP. Note, for reference: Maria NMT is based on BART, described here. A complete description of the dataset is given here, and is available on HuggingFace. # Attribution If you use this dataset please use the following to credit the creators of MultiNLI: The creators of the OPUS-MT models: and # License For both the Maria NMT model and the original Helsinki NLP Opus MT model we did not find a license, if this was in error please let us know and we will add the appropriate licensing promptly. We adopt the licensing of the MultiNLI corpus, which is stated as follows in the accompanying publication: *The majority of the corpus is released under the OANC’s license, which allows all content to be freely used, modified, and shared under permissive terms. The data in the FICTION section falls under several permissive licenses; Seven Swords is available under a Creative Commons Share-Alike 3.0 Unported License, and with the explicit permission of the author, Living History and Password Incorrect are available under Creative Commons Attribution 3.0 Unported Licenses; the remaining works of fiction are in the public domain in the United States (but may be licensed differently elsewhere).*
[ "# Dataset Card for \"MultiNLI_Dutch_translated_with_Marianmt\"\n\nTranslation of the English corpus Multi-Genre Natural Language Inference (MultiNLI) corpus , \nspecifically the *mismatched version*, to Dutch using an Maria NMT model, \ntrained by Helsinki NLP.\nNote, for reference: Maria NMT is based on BART, described here.\n\nA complete description of the dataset is given here, \nand is available on HuggingFace.", "# Attribution\n\nIf you use this dataset please use the following to credit the creators of MultiNLI:\n\n\n\nThe creators of the OPUS-MT models:\n\n\nand", "# License\n\nFor both the Maria NMT model and the original Helsinki NLP Opus MT model \nwe did not find a license, if this was in error please let us know and we will add the appropriate licensing promptly.\n\nWe adopt the licensing of the MultiNLI corpus, which is stated as follows in the accompanying publication:\n*The majority of the\ncorpus is released under the OANC’s license,\nwhich allows all content to be freely used, modified, and shared under permissive terms. The data\nin the FICTION section falls under several permissive licenses; Seven Swords is available under\na Creative Commons Share-Alike 3.0 Unported\nLicense, and with the explicit permission of the\nauthor, Living History and Password Incorrect are\navailable under Creative Commons Attribution\n3.0 Unported Licenses; the remaining works of\nfiction are in the public domain in the United\nStates (but may be licensed differently elsewhere).*" ]
[ "TAGS\n#task_categories-sentence-similarity #size_categories-100K<n<1M #language-Dutch #license-creativeml-openrail-m #arxiv-1910.13461 #doi-10.57967/hf/1417 #region-us \n", "# Dataset Card for \"MultiNLI_Dutch_translated_with_Marianmt\"\n\nTranslation of the English corpus Multi-Genre Natural Language Inference (MultiNLI) corpus , \nspecifically the *mismatched version*, to Dutch using an Maria NMT model, \ntrained by Helsinki NLP.\nNote, for reference: Maria NMT is based on BART, described here.\n\nA complete description of the dataset is given here, \nand is available on HuggingFace.", "# Attribution\n\nIf you use this dataset please use the following to credit the creators of MultiNLI:\n\n\n\nThe creators of the OPUS-MT models:\n\n\nand", "# License\n\nFor both the Maria NMT model and the original Helsinki NLP Opus MT model \nwe did not find a license, if this was in error please let us know and we will add the appropriate licensing promptly.\n\nWe adopt the licensing of the MultiNLI corpus, which is stated as follows in the accompanying publication:\n*The majority of the\ncorpus is released under the OANC’s license,\nwhich allows all content to be freely used, modified, and shared under permissive terms. The data\nin the FICTION section falls under several permissive licenses; Seven Swords is available under\na Creative Commons Share-Alike 3.0 Unported\nLicense, and with the explicit permission of the\nauthor, Living History and Password Incorrect are\navailable under Creative Commons Attribution\n3.0 Unported Licenses; the remaining works of\nfiction are in the public domain in the United\nStates (but may be licensed differently elsewhere).*" ]
[ 70, 111, 34, 196 ]
[ "passage: TAGS\n#task_categories-sentence-similarity #size_categories-100K<n<1M #language-Dutch #license-creativeml-openrail-m #arxiv-1910.13461 #doi-10.57967/hf/1417 #region-us \n# Dataset Card for \"MultiNLI_Dutch_translated_with_Marianmt\"\n\nTranslation of the English corpus Multi-Genre Natural Language Inference (MultiNLI) corpus , \nspecifically the *mismatched version*, to Dutch using an Maria NMT model, \ntrained by Helsinki NLP.\nNote, for reference: Maria NMT is based on BART, described here.\n\nA complete description of the dataset is given here, \nand is available on HuggingFace.# Attribution\n\nIf you use this dataset please use the following to credit the creators of MultiNLI:\n\n\n\nThe creators of the OPUS-MT models:\n\n\nand# License\n\nFor both the Maria NMT model and the original Helsinki NLP Opus MT model \nwe did not find a license, if this was in error please let us know and we will add the appropriate licensing promptly.\n\nWe adopt the licensing of the MultiNLI corpus, which is stated as follows in the accompanying publication:\n*The majority of the\ncorpus is released under the OANC’s license,\nwhich allows all content to be freely used, modified, and shared under permissive terms. The data\nin the FICTION section falls under several permissive licenses; Seven Swords is available under\na Creative Commons Share-Alike 3.0 Unported\nLicense, and with the explicit permission of the\nauthor, Living History and Password Incorrect are\navailable under Creative Commons Attribution\n3.0 Unported Licenses; the remaining works of\nfiction are in the public domain in the United\nStates (but may be licensed differently elsewhere).*" ]
3caf796a6fb6cb69efb2b630d36330bda0fd1b48
# Bangumi Image Base of Vivy - Fluorite Eye's Song This is the image base of bangumi Vivy - Fluorite Eye's Song, we detected 41 characters, 2718 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 16 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 109 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 16 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 987 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 34 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 42 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 32 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 35 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 41 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 30 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 20 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 57 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 43 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 19 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 36 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 99 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 29 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 21 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 48 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 62 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 41 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 53 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 28 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 32 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 29 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 81 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 91 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 48 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 24 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | ![preview 8](28/preview_8.png) | | 29 | 24 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | ![preview 7](29/preview_7.png) | ![preview 8](29/preview_8.png) | | 30 | 12 | [Download](30/dataset.zip) | ![preview 1](30/preview_1.png) | ![preview 2](30/preview_2.png) | ![preview 3](30/preview_3.png) | ![preview 4](30/preview_4.png) | ![preview 5](30/preview_5.png) | ![preview 6](30/preview_6.png) | ![preview 7](30/preview_7.png) | ![preview 8](30/preview_8.png) | | 31 | 22 | [Download](31/dataset.zip) | ![preview 1](31/preview_1.png) | ![preview 2](31/preview_2.png) | ![preview 3](31/preview_3.png) | ![preview 4](31/preview_4.png) | ![preview 5](31/preview_5.png) | ![preview 6](31/preview_6.png) | ![preview 7](31/preview_7.png) | ![preview 8](31/preview_8.png) | | 32 | 7 | [Download](32/dataset.zip) | ![preview 1](32/preview_1.png) | ![preview 2](32/preview_2.png) | ![preview 3](32/preview_3.png) | ![preview 4](32/preview_4.png) | ![preview 5](32/preview_5.png) | ![preview 6](32/preview_6.png) | ![preview 7](32/preview_7.png) | N/A | | 33 | 10 | [Download](33/dataset.zip) | ![preview 1](33/preview_1.png) | ![preview 2](33/preview_2.png) | ![preview 3](33/preview_3.png) | ![preview 4](33/preview_4.png) | ![preview 5](33/preview_5.png) | ![preview 6](33/preview_6.png) | ![preview 7](33/preview_7.png) | ![preview 8](33/preview_8.png) | | 34 | 27 | [Download](34/dataset.zip) | ![preview 1](34/preview_1.png) | ![preview 2](34/preview_2.png) | ![preview 3](34/preview_3.png) | ![preview 4](34/preview_4.png) | ![preview 5](34/preview_5.png) | ![preview 6](34/preview_6.png) | ![preview 7](34/preview_7.png) | ![preview 8](34/preview_8.png) | | 35 | 40 | [Download](35/dataset.zip) | ![preview 1](35/preview_1.png) | ![preview 2](35/preview_2.png) | ![preview 3](35/preview_3.png) | ![preview 4](35/preview_4.png) | ![preview 5](35/preview_5.png) | ![preview 6](35/preview_6.png) | ![preview 7](35/preview_7.png) | ![preview 8](35/preview_8.png) | | 36 | 33 | [Download](36/dataset.zip) | ![preview 1](36/preview_1.png) | ![preview 2](36/preview_2.png) | ![preview 3](36/preview_3.png) | ![preview 4](36/preview_4.png) | ![preview 5](36/preview_5.png) | ![preview 6](36/preview_6.png) | ![preview 7](36/preview_7.png) | ![preview 8](36/preview_8.png) | | 37 | 33 | [Download](37/dataset.zip) | ![preview 1](37/preview_1.png) | ![preview 2](37/preview_2.png) | ![preview 3](37/preview_3.png) | ![preview 4](37/preview_4.png) | ![preview 5](37/preview_5.png) | ![preview 6](37/preview_6.png) | ![preview 7](37/preview_7.png) | ![preview 8](37/preview_8.png) | | 38 | 8 | [Download](38/dataset.zip) | ![preview 1](38/preview_1.png) | ![preview 2](38/preview_2.png) | ![preview 3](38/preview_3.png) | ![preview 4](38/preview_4.png) | ![preview 5](38/preview_5.png) | ![preview 6](38/preview_6.png) | ![preview 7](38/preview_7.png) | ![preview 8](38/preview_8.png) | | 39 | 5 | [Download](39/dataset.zip) | ![preview 1](39/preview_1.png) | ![preview 2](39/preview_2.png) | ![preview 3](39/preview_3.png) | ![preview 4](39/preview_4.png) | ![preview 5](39/preview_5.png) | N/A | N/A | N/A | | noise | 294 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/vivyfluoriteeyessong
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-11-26T22:30:04+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-11-27T00:32:35+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Vivy - Fluorite Eye's Song ================================================ This is the image base of bangumi Vivy - Fluorite Eye's Song, we detected 41 characters, 2718 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
dece01f323c9c7b510e78ad2d3a813e6a7df784b
a
xshubhamx/places-5
[ "region:us" ]
2023-11-26T22:39:10+00:00
{}
2023-11-26T23:29:05+00:00
[]
[]
TAGS #region-us
a
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
478cd878f1dfd2ffc1b98e023cd5dc8678242c78
Samples from the LFW dataset. Samples where there is one more face per user were selected. They were then partitioned into two directories: ingestion and recovery. This was done to test a facial recognition system.
vilsonrodrigues/lfw
[ "license:apache-2.0", "region:us" ]
2023-11-26T23:49:09+00:00
{"license": "apache-2.0"}
2023-11-27T00:53:06+00:00
[]
[]
TAGS #license-apache-2.0 #region-us
Samples from the LFW dataset. Samples where there is one more face per user were selected. They were then partitioned into two directories: ingestion and recovery. This was done to test a facial recognition system.
[]
[ "TAGS\n#license-apache-2.0 #region-us \n" ]
[ 14 ]
[ "passage: TAGS\n#license-apache-2.0 #region-us \n" ]
d62809e3f5ad8e21c2473c997364815993cae97a
Wikipedia日本語版データセット(izumi-lab/wikipedia-ja-20230720)を元にGPT-3.5-Turboで会話文を生成したデータセットです GPT-3.5-Turboを使用しました。
shi3z/Japanese_wikipedia_conversation_100K
[ "license:mit", "region:us" ]
2023-11-27T00:11:17+00:00
{"license": "mit"}
2023-11-27T00:12:50+00:00
[]
[]
TAGS #license-mit #region-us
Wikipedia日本語版データセット(izumi-lab/wikipedia-ja-20230720)を元にGPT-3.5-Turboで会話文を生成したデータセットです GPT-3.5-Turboを使用しました。
[]
[ "TAGS\n#license-mit #region-us \n" ]
[ 11 ]
[ "passage: TAGS\n#license-mit #region-us \n" ]
fdff1caa072665842e610dfb968b2b19003ce62b
# Dataset Card for "autotrain-data-anm2-25mh-u5jo" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
healthcorum/autotrain-data-anm2-25mh-u5jo
[ "region:us" ]
2023-11-27T00:29:51+00:00
{"dataset_info": {"features": [{"name": "target", "dtype": "string"}, {"name": "autotrain_text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 36008183, "num_examples": 9998}, {"name": "validation", "num_bytes": 36008183, "num_examples": 9998}], "download_size": 11959104, "dataset_size": 72016366}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-11-27T00:29:52+00:00
[]
[]
TAGS #region-us
# Dataset Card for "autotrain-data-anm2-25mh-u5jo" More Information needed
[ "# Dataset Card for \"autotrain-data-anm2-25mh-u5jo\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"autotrain-data-anm2-25mh-u5jo\"\n\nMore Information needed" ]
[ 6, 25 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"autotrain-data-anm2-25mh-u5jo\"\n\nMore Information needed" ]
d55d2946100e0a50d8927d788634c94e14622e66
# Dataset Card for "emb-hotpotqa-train" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
multi-train/emb-hotpotqa-train
[ "region:us" ]
2023-11-27T01:04:23+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "query", "dtype": "string"}, {"name": "pos", "dtype": "string"}, {"name": "neg", "dtype": "string"}, {"name": "idx", "dtype": "int64"}, {"name": "task_name", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 76992682, "num_examples": 68659}], "download_size": 50772036, "dataset_size": 76992682}}
2023-11-27T01:04:35+00:00
[]
[]
TAGS #region-us
# Dataset Card for "emb-hotpotqa-train" More Information needed
[ "# Dataset Card for \"emb-hotpotqa-train\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"emb-hotpotqa-train\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"emb-hotpotqa-train\"\n\nMore Information needed" ]
8d37eb0b74c3494a6ed94f28ce6403bbb3bb1ba0
# Dataset Card for "reverse_squad" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
edsealing/reverse_squad
[ "region:us" ]
2023-11-27T01:29:35+00:00
{"dataset_info": {"features": [{"name": "numqas", "dtype": "int64"}, {"name": "context", "dtype": "string"}, {"name": "qas", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 28879823, "num_examples": 18896}], "download_size": 15715321, "dataset_size": 28879823}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-27T01:29:37+00:00
[]
[]
TAGS #region-us
# Dataset Card for "reverse_squad" More Information needed
[ "# Dataset Card for \"reverse_squad\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"reverse_squad\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"reverse_squad\"\n\nMore Information needed" ]
9277ebcac47a2ea35bd88d805f4306ff703e08a6
# Dataset Card for "emb-pubmed" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
multi-train/emb-pubmed
[ "region:us" ]
2023-11-27T01:43:16+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "query", "dtype": "string"}, {"name": "pos", "dtype": "string"}, {"name": "idx", "dtype": "int64"}, {"name": "task_name", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 321131040, "num_examples": 212269}], "download_size": 181748325, "dataset_size": 321131040}}
2023-11-27T01:43:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for "emb-pubmed" More Information needed
[ "# Dataset Card for \"emb-pubmed\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"emb-pubmed\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"emb-pubmed\"\n\nMore Information needed" ]
9710b3dca6a58a3823bb9f25b660a8b4dff99d66
# Dataset Card for "emb-medmcqa-train" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
multi-train/emb-medmcqa-train
[ "region:us" ]
2023-11-27T01:52:02+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "query", "dtype": "string"}, {"name": "pos", "dtype": "string"}, {"name": "idx", "dtype": "int64"}, {"name": "task_name", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 102292372, "num_examples": 160869}], "download_size": 63892842, "dataset_size": 102292372}}
2023-11-27T01:52:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "emb-medmcqa-train" More Information needed
[ "# Dataset Card for \"emb-medmcqa-train\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"emb-medmcqa-train\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"emb-medmcqa-train\"\n\nMore Information needed" ]
e92819bfad8706c5f4fdd65a0e2c19a578dcef6f
# Dataset Card for "emb-record_train" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
multi-train/emb-record_train
[ "region:us" ]
2023-11-27T01:56:12+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "query", "dtype": "string"}, {"name": "pos", "dtype": "string"}, {"name": "idx", "dtype": "int64"}, {"name": "task_name", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 78813006, "num_examples": 65709}], "download_size": 48370504, "dataset_size": 78813006}}
2023-11-27T01:56:21+00:00
[]
[]
TAGS #region-us
# Dataset Card for "emb-record_train" More Information needed
[ "# Dataset Card for \"emb-record_train\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"emb-record_train\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"emb-record_train\"\n\nMore Information needed" ]
735ca417ec8275d378a0de045650041169650b16
# Dataset Card for "mimic_tokenized_dataset_balanced" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
passionMan/mimic_tokenized_dataset_balanced
[ "region:us" ]
2023-11-27T02:08:52+00:00
{"dataset_info": {"features": [{"name": "context", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 1283535100, "num_examples": 431743}, {"name": "test", "num_bytes": 427844521, "num_examples": 143915}], "download_size": 231786541, "dataset_size": 1711379621}}
2023-11-27T02:09:44+00:00
[]
[]
TAGS #region-us
# Dataset Card for "mimic_tokenized_dataset_balanced" More Information needed
[ "# Dataset Card for \"mimic_tokenized_dataset_balanced\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"mimic_tokenized_dataset_balanced\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"mimic_tokenized_dataset_balanced\"\n\nMore Information needed" ]
042e23609985fac8e962eeeb729b5c044721aa55
# Dataset Card for "emb-scitldr" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
multi-train/emb-scitldr
[ "region:us" ]
2023-11-27T02:12:03+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "query", "dtype": "string"}, {"name": "pos", "dtype": "string"}, {"name": "idx", "dtype": "int64"}, {"name": "task_name", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 59114455, "num_examples": 1992}], "download_size": 29584964, "dataset_size": 59114455}}
2023-11-27T02:12:10+00:00
[]
[]
TAGS #region-us
# Dataset Card for "emb-scitldr" More Information needed
[ "# Dataset Card for \"emb-scitldr\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"emb-scitldr\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"emb-scitldr\"\n\nMore Information needed" ]
7dd15604cd49602b4ed66c8769204fb50b9c3a5c
# Dataset Card for "emb-trex-train" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
multi-train/emb-trex-train
[ "region:us" ]
2023-11-27T02:24:30+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "query", "dtype": "string"}, {"name": "pos", "dtype": "string"}, {"name": "neg", "dtype": "string"}, {"name": "idx", "dtype": "int64"}, {"name": "task_name", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2175572925, "num_examples": 2284168}], "download_size": 1321673983, "dataset_size": 2175572925}}
2023-11-27T02:26:18+00:00
[]
[]
TAGS #region-us
# Dataset Card for "emb-trex-train" More Information needed
[ "# Dataset Card for \"emb-trex-train\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"emb-trex-train\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"emb-trex-train\"\n\nMore Information needed" ]
6a4b3e9a9ae5593a819ee29b09beeb835e8541b3
# Dataset Card for "emb-triviaqa-train" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
multi-train/emb-triviaqa-train
[ "region:us" ]
2023-11-27T02:28:49+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "query", "dtype": "string"}, {"name": "pos", "dtype": "string"}, {"name": "neg", "dtype": "string"}, {"name": "idx", "dtype": "int64"}, {"name": "task_name", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 59025209, "num_examples": 52886}], "download_size": 39225639, "dataset_size": 59025209}}
2023-11-27T02:28:57+00:00
[]
[]
TAGS #region-us
# Dataset Card for "emb-triviaqa-train" More Information needed
[ "# Dataset Card for \"emb-triviaqa-train\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"emb-triviaqa-train\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"emb-triviaqa-train\"\n\nMore Information needed" ]
0161388f93bfbbc56d22312f67a4cfeced53e9af
# Dataset Card for "emb-wow-train" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
multi-train/emb-wow-train
[ "region:us" ]
2023-11-27T02:31:04+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "query", "dtype": "string"}, {"name": "pos", "dtype": "string"}, {"name": "neg", "dtype": "string"}, {"name": "idx", "dtype": "int64"}, {"name": "task_name", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 112157723, "num_examples": 80035}], "download_size": 65616642, "dataset_size": 112157723}}
2023-11-27T02:31:14+00:00
[]
[]
TAGS #region-us
# Dataset Card for "emb-wow-train" More Information needed
[ "# Dataset Card for \"emb-wow-train\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"emb-wow-train\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"emb-wow-train\"\n\nMore Information needed" ]
d79536afafe5c75a533ca0a0208667cf8835a82a
# Dataset Card for "emb-zeroshot-train" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
multi-train/emb-zeroshot-train
[ "region:us" ]
2023-11-27T02:31:46+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "query", "dtype": "string"}, {"name": "pos", "dtype": "string"}, {"name": "neg", "dtype": "string"}, {"name": "idx", "dtype": "int64"}, {"name": "task_name", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 131176509, "num_examples": 132063}], "download_size": 75790546, "dataset_size": 131176509}}
2023-11-27T02:31:55+00:00
[]
[]
TAGS #region-us
# Dataset Card for "emb-zeroshot-train" More Information needed
[ "# Dataset Card for \"emb-zeroshot-train\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"emb-zeroshot-train\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"emb-zeroshot-train\"\n\nMore Information needed" ]
255ec54b4e4ffe8300b27fe6c4c9ec8b6dad0a2a
# Mosaic format for instructions dataset to train Malaysian LLM This repository is to store dataset shards using mosaic format. 1. prepared at https://github.com/malaysia-ai/dedup-text-dataset/blob/main/pretrain-llm/combine-instructions.ipynb 2. using tokenizer https://huggingface.co/malaysia-ai/bpe-tokenizer 3. 4096 context length. ## how-to 1. git clone, ```bash git lfs clone https://huggingface.co/datasets/malaysia-ai/mosaic-instructions ``` 2. load it, ```python from streaming import LocalDataset import numpy as np from streaming.base.format.mds.encodings import Encoding, _encodings class UInt16(Encoding): def encode(self, obj) -> bytes: return obj.tobytes() def decode(self, data: bytes): return np.frombuffer(data, np.uint16) _encodings['uint16'] = UInt16 dataset = LocalDataset('mosaic-instructions') len(dataset) ```
malaysia-ai/mosaic-instructions
[ "language:ms", "region:us" ]
2023-11-27T03:05:24+00:00
{"language": ["ms"]}
2023-11-28T08:19:27+00:00
[]
[ "ms" ]
TAGS #language-Malay (macrolanguage) #region-us
# Mosaic format for instructions dataset to train Malaysian LLM This repository is to store dataset shards using mosaic format. 1. prepared at URL 2. using tokenizer URL 3. 4096 context length. ## how-to 1. git clone, 2. load it,
[ "# Mosaic format for instructions dataset to train Malaysian LLM\n\nThis repository is to store dataset shards using mosaic format.\n\n1. prepared at URL\n2. using tokenizer URL\n3. 4096 context length.", "## how-to\n\n1. git clone,\n\n\n\n2. load it," ]
[ "TAGS\n#language-Malay (macrolanguage) #region-us \n", "# Mosaic format for instructions dataset to train Malaysian LLM\n\nThis repository is to store dataset shards using mosaic format.\n\n1. prepared at URL\n2. using tokenizer URL\n3. 4096 context length.", "## how-to\n\n1. git clone,\n\n\n\n2. load it," ]
[ 16, 46, 13 ]
[ "passage: TAGS\n#language-Malay (macrolanguage) #region-us \n# Mosaic format for instructions dataset to train Malaysian LLM\n\nThis repository is to store dataset shards using mosaic format.\n\n1. prepared at URL\n2. using tokenizer URL\n3. 4096 context length.## how-to\n\n1. git clone,\n\n\n\n2. load it," ]
a04976789be99481a29dd26baba421467d9517e0
# Mosaic format for extra dataset to train Malaysian LLM This repository is to store dataset shards using mosaic format. 1. prepared at https://github.com/malaysia-ai/dedup-text-dataset/blob/main/pretrain-llm/combine-extra.ipynb 2. using tokenizer https://huggingface.co/malaysia-ai/bpe-tokenizer 3. 4096 context length. ## how-to 1. git clone, ```bash git lfs clone https://huggingface.co/datasets/malaysia-ai/mosaic-extra ``` 2. load it, ```python from streaming import LocalDataset import numpy as np from streaming.base.format.mds.encodings import Encoding, _encodings class UInt16(Encoding): def encode(self, obj) -> bytes: return obj.tobytes() def decode(self, data: bytes): return np.frombuffer(data, np.uint16) _encodings['uint16'] = UInt16 dataset = LocalDataset('mosaic-extra') len(dataset) ```
malaysia-ai/mosaic-extra
[ "language:ms", "region:us" ]
2023-11-27T03:08:53+00:00
{"language": ["ms"]}
2023-11-28T08:13:23+00:00
[]
[ "ms" ]
TAGS #language-Malay (macrolanguage) #region-us
# Mosaic format for extra dataset to train Malaysian LLM This repository is to store dataset shards using mosaic format. 1. prepared at URL 2. using tokenizer URL 3. 4096 context length. ## how-to 1. git clone, 2. load it,
[ "# Mosaic format for extra dataset to train Malaysian LLM\n\nThis repository is to store dataset shards using mosaic format.\n\n1. prepared at URL\n2. using tokenizer URL\n3. 4096 context length.", "## how-to\n\n1. git clone,\n\n\n\n2. load it," ]
[ "TAGS\n#language-Malay (macrolanguage) #region-us \n", "# Mosaic format for extra dataset to train Malaysian LLM\n\nThis repository is to store dataset shards using mosaic format.\n\n1. prepared at URL\n2. using tokenizer URL\n3. 4096 context length.", "## how-to\n\n1. git clone,\n\n\n\n2. load it," ]
[ 16, 46, 13 ]
[ "passage: TAGS\n#language-Malay (macrolanguage) #region-us \n# Mosaic format for extra dataset to train Malaysian LLM\n\nThis repository is to store dataset shards using mosaic format.\n\n1. prepared at URL\n2. using tokenizer URL\n3. 4096 context length.## how-to\n\n1. git clone,\n\n\n\n2. load it," ]
a24622fdfe9effd333bdb2950b6d0c9f2bf4e95b
# Mosaic format for extra dataset to train Malaysian LLM This repository is to store dataset shards using mosaic format. 1. prepared at https://github.com/malaysia-ai/dedup-text-dataset/blob/main/pretrain-llm/combine-madlad-400-ms.ipynb 2. using tokenizer https://huggingface.co/malaysia-ai/bpe-tokenizer 3. 4096 context length. ## how-to 1. git clone, ```bash git lfs clone https://huggingface.co/datasets/malaysia-ai/mosaic-madlad-400-ms ``` 2. load it, ```python from streaming import LocalDataset import numpy as np from streaming.base.format.mds.encodings import Encoding, _encodings class UInt16(Encoding): def encode(self, obj) -> bytes: return obj.tobytes() def decode(self, data: bytes): return np.frombuffer(data, np.uint16) _encodings['uint16'] = UInt16 dataset = LocalDataset('mosaic-madlad-400-ms') len(dataset) ```
malaysia-ai/mosaic-madlad-400-ms
[ "language:ms", "region:us" ]
2023-11-27T03:14:00+00:00
{"language": ["ms"]}
2023-11-28T08:17:38+00:00
[]
[ "ms" ]
TAGS #language-Malay (macrolanguage) #region-us
# Mosaic format for extra dataset to train Malaysian LLM This repository is to store dataset shards using mosaic format. 1. prepared at URL 2. using tokenizer URL 3. 4096 context length. ## how-to 1. git clone, 2. load it,
[ "# Mosaic format for extra dataset to train Malaysian LLM\n\nThis repository is to store dataset shards using mosaic format.\n\n1. prepared at URL\n2. using tokenizer URL\n3. 4096 context length.", "## how-to\n\n1. git clone,\n\n\n\n2. load it," ]
[ "TAGS\n#language-Malay (macrolanguage) #region-us \n", "# Mosaic format for extra dataset to train Malaysian LLM\n\nThis repository is to store dataset shards using mosaic format.\n\n1. prepared at URL\n2. using tokenizer URL\n3. 4096 context length.", "## how-to\n\n1. git clone,\n\n\n\n2. load it," ]
[ 16, 46, 13 ]
[ "passage: TAGS\n#language-Malay (macrolanguage) #region-us \n# Mosaic format for extra dataset to train Malaysian LLM\n\nThis repository is to store dataset shards using mosaic format.\n\n1. prepared at URL\n2. using tokenizer URL\n3. 4096 context length.## how-to\n\n1. git clone,\n\n\n\n2. load it," ]
edfa72fb434a47de964572f839bcbfffeb8dea98
# Mosaic format for dedup text dataset to train Malaysian LLM This repository is to store dataset shards using mosaic format. 1. prepared at https://github.com/malaysia-ai/dedup-text-dataset/blob/main/pretrain-llm/combine-dedup-text-dataset-4096.ipynb 2. using tokenizer https://huggingface.co/malaysia-ai/bpe-tokenizer 3. 4096 context length. ## how-to 1. git clone, ```bash git lfs clone https://huggingface.co/datasets/malaysia-ai/mosaic-dedup-text-dataset ``` 2. load it, ```python from streaming import LocalDataset import numpy as np from streaming.base.format.mds.encodings import Encoding, _encodings class UInt16(Encoding): def encode(self, obj) -> bytes: return obj.tobytes() def decode(self, data: bytes): return np.frombuffer(data, np.uint16) _encodings['uint16'] = UInt16 dataset = LocalDataset('mosaic-dedup-text-dataset') len(dataset) ```
malaysia-ai/mosaic-dedup-text-dataset
[ "language:ms", "region:us" ]
2023-11-27T03:14:16+00:00
{"language": ["ms"]}
2023-12-01T02:16:36+00:00
[]
[ "ms" ]
TAGS #language-Malay (macrolanguage) #region-us
# Mosaic format for dedup text dataset to train Malaysian LLM This repository is to store dataset shards using mosaic format. 1. prepared at URL 2. using tokenizer URL 3. 4096 context length. ## how-to 1. git clone, 2. load it,
[ "# Mosaic format for dedup text dataset to train Malaysian LLM\n\nThis repository is to store dataset shards using mosaic format.\n\n1. prepared at URL\n2. using tokenizer URL\n3. 4096 context length.", "## how-to\n\n1. git clone,\n\n\n\n2. load it," ]
[ "TAGS\n#language-Malay (macrolanguage) #region-us \n", "# Mosaic format for dedup text dataset to train Malaysian LLM\n\nThis repository is to store dataset shards using mosaic format.\n\n1. prepared at URL\n2. using tokenizer URL\n3. 4096 context length.", "## how-to\n\n1. git clone,\n\n\n\n2. load it," ]
[ 16, 49, 13 ]
[ "passage: TAGS\n#language-Malay (macrolanguage) #region-us \n# Mosaic format for dedup text dataset to train Malaysian LLM\n\nThis repository is to store dataset shards using mosaic format.\n\n1. prepared at URL\n2. using tokenizer URL\n3. 4096 context length.## how-to\n\n1. git clone,\n\n\n\n2. load it," ]
1168c5eb007ec4dccc01e26641b690b15749963f
# Dataset Card for "omcs_dataset_full_with_embeds" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dutta18/omcs_dataset_full_with_embeds
[ "region:us" ]
2023-11-27T03:31:24+00:00
{"dataset_info": {"features": [{"name": "fact", "dtype": "string"}, {"name": "count", "dtype": "int64"}, {"name": "embeddings", "sequence": "float32"}], "splits": [{"name": "train", "num_bytes": 4951309139, "num_examples": 1578238}], "download_size": 5895178326, "dataset_size": 4951309139}}
2023-11-27T03:37:18+00:00
[]
[]
TAGS #region-us
# Dataset Card for "omcs_dataset_full_with_embeds" More Information needed
[ "# Dataset Card for \"omcs_dataset_full_with_embeds\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"omcs_dataset_full_with_embeds\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"omcs_dataset_full_with_embeds\"\n\nMore Information needed" ]
88590db770d73c6c26f918d701dba4bbd73f265b
# Dataset Card for "emb-reddit-title-body" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
multi-train/emb-reddit-title-body
[ "region:us" ]
2023-11-27T03:33:05+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "query", "dtype": "string"}, {"name": "pos", "dtype": "string"}, {"name": "idx", "dtype": "int64"}, {"name": "task_name", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 95637449375, "num_examples": 127445911}], "download_size": 3302152777, "dataset_size": 95637449375}}
2023-11-27T06:18:03+00:00
[]
[]
TAGS #region-us
# Dataset Card for "emb-reddit-title-body" More Information needed
[ "# Dataset Card for \"emb-reddit-title-body\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"emb-reddit-title-body\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"emb-reddit-title-body\"\n\nMore Information needed" ]
6957d8fc57407c3de086df068ef11b40cc692a75
# Mosaic format for filtered starcoder dataset to train Malaysian LLM This repository is to store dataset shards using mosaic format. 1. prepared at https://github.com/malaysia-ai/dedup-text-dataset/blob/main/pretrain-llm/combine-starcoder.ipynb 2. using tokenizer https://huggingface.co/malaysia-ai/bpe-tokenizer 3. 4096 context length. ## how-to 1. git clone, ```bash git lfs clone https://huggingface.co/datasets/malaysia-ai/mosaic-starcoder-filtered ``` 2. load it, ```python from streaming import LocalDataset import numpy as np from streaming.base.format.mds.encodings import Encoding, _encodings class UInt16(Encoding): def encode(self, obj) -> bytes: return obj.tobytes() def decode(self, data: bytes): return np.frombuffer(data, np.uint16) _encodings['uint16'] = UInt16 dataset = LocalDataset('mosaic-starcoder-filtered') len(dataset) ```
malaysia-ai/mosaic-starcoder-filtered
[ "language:ms", "region:us" ]
2023-11-27T03:47:08+00:00
{"language": ["ms"]}
2023-11-28T08:14:20+00:00
[]
[ "ms" ]
TAGS #language-Malay (macrolanguage) #region-us
# Mosaic format for filtered starcoder dataset to train Malaysian LLM This repository is to store dataset shards using mosaic format. 1. prepared at URL 2. using tokenizer URL 3. 4096 context length. ## how-to 1. git clone, 2. load it,
[ "# Mosaic format for filtered starcoder dataset to train Malaysian LLM\n\nThis repository is to store dataset shards using mosaic format.\n\n1. prepared at URL\n2. using tokenizer URL\n3. 4096 context length.", "## how-to\n\n1. git clone,\n\n\n\n2. load it," ]
[ "TAGS\n#language-Malay (macrolanguage) #region-us \n", "# Mosaic format for filtered starcoder dataset to train Malaysian LLM\n\nThis repository is to store dataset shards using mosaic format.\n\n1. prepared at URL\n2. using tokenizer URL\n3. 4096 context length.", "## how-to\n\n1. git clone,\n\n\n\n2. load it," ]
[ 16, 50, 13 ]
[ "passage: TAGS\n#language-Malay (macrolanguage) #region-us \n# Mosaic format for filtered starcoder dataset to train Malaysian LLM\n\nThis repository is to store dataset shards using mosaic format.\n\n1. prepared at URL\n2. using tokenizer URL\n3. 4096 context length.## how-to\n\n1. git clone,\n\n\n\n2. load it," ]
b0d44653a7e382014f44616608ec1ee57c74bc86
# Dataset Card for "mimic_tokenized_dataset_balanced_frac_0.1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
passionMan/mimic_tokenized_dataset_balanced_frac_0.1
[ "region:us" ]
2023-11-27T04:05:42+00:00
{"dataset_info": {"features": [{"name": "context", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 195080305, "num_examples": 64761}, {"name": "test", "num_bytes": 65034069, "num_examples": 21588}], "download_size": 36831082, "dataset_size": 260114374}}
2023-11-27T08:33:59+00:00
[]
[]
TAGS #region-us
# Dataset Card for "mimic_tokenized_dataset_balanced_frac_0.1" More Information needed
[ "# Dataset Card for \"mimic_tokenized_dataset_balanced_frac_0.1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"mimic_tokenized_dataset_balanced_frac_0.1\"\n\nMore Information needed" ]
[ 6, 26 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"mimic_tokenized_dataset_balanced_frac_0.1\"\n\nMore Information needed" ]
f65ae6f9994d62c036c52ecb969a807ad1c43c0c
# Dataset Card for "gun-dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
JoseArmando07/gun-dataset
[ "region:us" ]
2023-11-27T06:00:42+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "width", "dtype": "int64"}, {"name": "height", "dtype": "int64"}, {"name": "objects", "struct": [{"name": "name", "dtype": "string"}, {"name": "bbox", "sequence": {"sequence": "int64"}}, {"name": "category", "sequence": "int64"}, {"name": "area", "sequence": "int64"}, {"name": "id", "sequence": "int64"}]}, {"name": "image_id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 2251055094.77, "num_examples": 9990}, {"name": "validation", "num_bytes": 74070838.0, "num_examples": 366}, {"name": "test", "num_bytes": 158343390.801, "num_examples": 1489}], "download_size": 243878271, "dataset_size": 2483469323.571}}
2023-12-06T07:21:17+00:00
[]
[]
TAGS #region-us
# Dataset Card for "gun-dataset" More Information needed
[ "# Dataset Card for \"gun-dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"gun-dataset\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"gun-dataset\"\n\nMore Information needed" ]
ab2612bfadbedc07394b199e819cfb0b290eba83
# Bangumi Image Base of Kono Oto Tomare! This is the image base of bangumi Kono Oto Tomare!, we detected 34 characters, 3706 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 489 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 61 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 270 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 572 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 146 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 134 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 154 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 45 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 56 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 42 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 26 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 166 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 75 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 43 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 12 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 18 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 50 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 78 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 188 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 24 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 13 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 162 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 29 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 565 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 33 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 11 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 10 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 9 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 14 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | ![preview 8](28/preview_8.png) | | 29 | 17 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | ![preview 7](29/preview_7.png) | ![preview 8](29/preview_8.png) | | 30 | 9 | [Download](30/dataset.zip) | ![preview 1](30/preview_1.png) | ![preview 2](30/preview_2.png) | ![preview 3](30/preview_3.png) | ![preview 4](30/preview_4.png) | ![preview 5](30/preview_5.png) | ![preview 6](30/preview_6.png) | ![preview 7](30/preview_7.png) | ![preview 8](30/preview_8.png) | | 31 | 40 | [Download](31/dataset.zip) | ![preview 1](31/preview_1.png) | ![preview 2](31/preview_2.png) | ![preview 3](31/preview_3.png) | ![preview 4](31/preview_4.png) | ![preview 5](31/preview_5.png) | ![preview 6](31/preview_6.png) | ![preview 7](31/preview_7.png) | ![preview 8](31/preview_8.png) | | 32 | 9 | [Download](32/dataset.zip) | ![preview 1](32/preview_1.png) | ![preview 2](32/preview_2.png) | ![preview 3](32/preview_3.png) | ![preview 4](32/preview_4.png) | ![preview 5](32/preview_5.png) | ![preview 6](32/preview_6.png) | ![preview 7](32/preview_7.png) | ![preview 8](32/preview_8.png) | | noise | 136 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/konoototomare
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-11-27T06:04:00+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-11-27T08:10:57+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Kono Oto Tomare! ====================================== This is the image base of bangumi Kono Oto Tomare!, we detected 34 characters, 3706 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
df6bec9357949c25f07d630ae1394ab6fd0249af
# Dataset Card for Video Infilling and Prediction (VIP) Video Infilling and Prediction (VIP) is a benchmark dataset for assessing the sequential commonsense reasoning capabilities of vision-language models by generating explanations of videos. [See our EMNLP 2023 paper introducing this work](https://aclanthology.org/2023.emnlp-main.15/) ## Dataset Details - **Curated by:** Vaishnavi Himakunthala, Andy Ouyang, Daniel Rose, Ryan He, Alex Mei, Yujie Lu, Chinmay Sonar, Michael Saxon, William Wang (UC Santa Barbara) - **Funded by:** Amazon AWS AI/ML Research Award, AWS Cloud Credit for Research, NSF REU #2048122 - **Language(s) (NLP):** English ### Dataset Description - VIP is an inference-time dataset that contains over 1.5k video keyframes and two forms of textual descriptions for each keyframe: an unstructured dense caption and a structured description clearly defining the Focus, Action, Mood, Objects, and Setting (FAMOuS) of each keyframe. ### Dataset Source and Creation - We use the Youtube-8M dataset to collect videos and follow a pipelined approach to extract keyframes and descriptions for the VIP dataset - Each description is verified by human annotation ## Uses/Tasks - We define two new tasks: Video Infilling and Video Prediction. - Video Infilling: Given 1, 2, or 3 surrounding keyframes, predict the keyframes in between. - Video Prediction: Given 1, 2, or 3 previous keyframes, predict the keyframes that come after. - Both of these tasks can be accomplished by using only the keyframe image, only the keyframe descriptions, or using both the descriptions and the images, allowing benchmarking on various VL models. For more information on the tasks or the dataset collection process, please refer to our paper: https://arxiv.org/pdf/2305.13903.pdf If you find this dataset helpful for your work, please cite using this citation: ``` @inproceedings{ himakunthala2023lets, title={Let's Think Frame by Frame with {VIP}: A Video Infilling and Prediction Dataset for Evaluating Video Chain-of-Thought}, author={Vaishnavi Himakunthala and Andy Ouyang and Daniel Philip Rose and Ryan He and Alex Mei and Yujie Lu and Chinmay Sonar and Michael Saxon and William Yang Wang}, booktitle={The 2023 Conference on Empirical Methods in Natural Language Processing}, year={2023}, url={https://openreview.net/forum?id=y6Ej5BZkrR} } ```
ryanhe/VIP
[ "task_categories:video-classification", "task_categories:image-to-text", "language:en", "license:apache-2.0", "arxiv:2305.13903", "region:us" ]
2023-11-27T06:57:24+00:00
{"language": ["en"], "license": "apache-2.0", "task_categories": ["video-classification", "image-to-text"], "pretty_name": "VIP"}
2023-12-23T01:04:07+00:00
[ "2305.13903" ]
[ "en" ]
TAGS #task_categories-video-classification #task_categories-image-to-text #language-English #license-apache-2.0 #arxiv-2305.13903 #region-us
# Dataset Card for Video Infilling and Prediction (VIP) Video Infilling and Prediction (VIP) is a benchmark dataset for assessing the sequential commonsense reasoning capabilities of vision-language models by generating explanations of videos. See our EMNLP 2023 paper introducing this work ## Dataset Details - Curated by: Vaishnavi Himakunthala, Andy Ouyang, Daniel Rose, Ryan He, Alex Mei, Yujie Lu, Chinmay Sonar, Michael Saxon, William Wang (UC Santa Barbara) - Funded by: Amazon AWS AI/ML Research Award, AWS Cloud Credit for Research, NSF REU #2048122 - Language(s) (NLP): English ### Dataset Description - VIP is an inference-time dataset that contains over 1.5k video keyframes and two forms of textual descriptions for each keyframe: an unstructured dense caption and a structured description clearly defining the Focus, Action, Mood, Objects, and Setting (FAMOuS) of each keyframe. ### Dataset Source and Creation - We use the Youtube-8M dataset to collect videos and follow a pipelined approach to extract keyframes and descriptions for the VIP dataset - Each description is verified by human annotation ## Uses/Tasks - We define two new tasks: Video Infilling and Video Prediction. - Video Infilling: Given 1, 2, or 3 surrounding keyframes, predict the keyframes in between. - Video Prediction: Given 1, 2, or 3 previous keyframes, predict the keyframes that come after. - Both of these tasks can be accomplished by using only the keyframe image, only the keyframe descriptions, or using both the descriptions and the images, allowing benchmarking on various VL models. For more information on the tasks or the dataset collection process, please refer to our paper: URL If you find this dataset helpful for your work, please cite using this citation:
[ "# Dataset Card for Video Infilling and Prediction (VIP)\n\nVideo Infilling and Prediction (VIP) is a benchmark dataset for assessing the sequential commonsense reasoning capabilities of vision-language models by generating explanations of videos.\n\nSee our EMNLP 2023 paper introducing this work", "## Dataset Details\n\n- Curated by: Vaishnavi Himakunthala, Andy Ouyang, Daniel Rose, Ryan He, Alex Mei, Yujie Lu, Chinmay Sonar, Michael Saxon, William Wang (UC Santa Barbara)\n- Funded by: Amazon AWS AI/ML Research Award, AWS Cloud Credit\nfor Research, NSF REU #2048122\n- Language(s) (NLP): English", "### Dataset Description\n\n- VIP is an inference-time dataset that contains over 1.5k video keyframes and two forms of textual descriptions for each keyframe: an unstructured dense caption and a structured description clearly defining the Focus, Action, Mood, Objects, and Setting (FAMOuS) of each keyframe.", "### Dataset Source and Creation\n\n - We use the Youtube-8M dataset to collect videos and follow a pipelined approach to extract keyframes and descriptions for the VIP dataset\n - Each description is verified by human annotation", "## Uses/Tasks\n\n- We define two new tasks: Video Infilling and Video Prediction.\n - Video Infilling: Given 1, 2, or 3 surrounding keyframes, predict the keyframes in between.\n - Video Prediction: Given 1, 2, or 3 previous keyframes, predict the keyframes that come after.\n- Both of these tasks can be accomplished by using only the keyframe image, only the keyframe descriptions, or using both the descriptions and the images, allowing benchmarking on various VL models.\n\n\nFor more information on the tasks or the dataset collection process, please refer to our paper:\nURL\n\nIf you find this dataset helpful for your work, please cite using this citation:" ]
[ "TAGS\n#task_categories-video-classification #task_categories-image-to-text #language-English #license-apache-2.0 #arxiv-2305.13903 #region-us \n", "# Dataset Card for Video Infilling and Prediction (VIP)\n\nVideo Infilling and Prediction (VIP) is a benchmark dataset for assessing the sequential commonsense reasoning capabilities of vision-language models by generating explanations of videos.\n\nSee our EMNLP 2023 paper introducing this work", "## Dataset Details\n\n- Curated by: Vaishnavi Himakunthala, Andy Ouyang, Daniel Rose, Ryan He, Alex Mei, Yujie Lu, Chinmay Sonar, Michael Saxon, William Wang (UC Santa Barbara)\n- Funded by: Amazon AWS AI/ML Research Award, AWS Cloud Credit\nfor Research, NSF REU #2048122\n- Language(s) (NLP): English", "### Dataset Description\n\n- VIP is an inference-time dataset that contains over 1.5k video keyframes and two forms of textual descriptions for each keyframe: an unstructured dense caption and a structured description clearly defining the Focus, Action, Mood, Objects, and Setting (FAMOuS) of each keyframe.", "### Dataset Source and Creation\n\n - We use the Youtube-8M dataset to collect videos and follow a pipelined approach to extract keyframes and descriptions for the VIP dataset\n - Each description is verified by human annotation", "## Uses/Tasks\n\n- We define two new tasks: Video Infilling and Video Prediction.\n - Video Infilling: Given 1, 2, or 3 surrounding keyframes, predict the keyframes in between.\n - Video Prediction: Given 1, 2, or 3 previous keyframes, predict the keyframes that come after.\n- Both of these tasks can be accomplished by using only the keyframe image, only the keyframe descriptions, or using both the descriptions and the images, allowing benchmarking on various VL models.\n\n\nFor more information on the tasks or the dataset collection process, please refer to our paper:\nURL\n\nIf you find this dataset helpful for your work, please cite using this citation:" ]
[ 50, 68, 90, 78, 50, 164 ]
[ "passage: TAGS\n#task_categories-video-classification #task_categories-image-to-text #language-English #license-apache-2.0 #arxiv-2305.13903 #region-us \n# Dataset Card for Video Infilling and Prediction (VIP)\n\nVideo Infilling and Prediction (VIP) is a benchmark dataset for assessing the sequential commonsense reasoning capabilities of vision-language models by generating explanations of videos.\n\nSee our EMNLP 2023 paper introducing this work## Dataset Details\n\n- Curated by: Vaishnavi Himakunthala, Andy Ouyang, Daniel Rose, Ryan He, Alex Mei, Yujie Lu, Chinmay Sonar, Michael Saxon, William Wang (UC Santa Barbara)\n- Funded by: Amazon AWS AI/ML Research Award, AWS Cloud Credit\nfor Research, NSF REU #2048122\n- Language(s) (NLP): English### Dataset Description\n\n- VIP is an inference-time dataset that contains over 1.5k video keyframes and two forms of textual descriptions for each keyframe: an unstructured dense caption and a structured description clearly defining the Focus, Action, Mood, Objects, and Setting (FAMOuS) of each keyframe.### Dataset Source and Creation\n\n - We use the Youtube-8M dataset to collect videos and follow a pipelined approach to extract keyframes and descriptions for the VIP dataset\n - Each description is verified by human annotation## Uses/Tasks\n\n- We define two new tasks: Video Infilling and Video Prediction.\n - Video Infilling: Given 1, 2, or 3 surrounding keyframes, predict the keyframes in between.\n - Video Prediction: Given 1, 2, or 3 previous keyframes, predict the keyframes that come after.\n- Both of these tasks can be accomplished by using only the keyframe image, only the keyframe descriptions, or using both the descriptions and the images, allowing benchmarking on various VL models.\n\n\nFor more information on the tasks or the dataset collection process, please refer to our paper:\nURL\n\nIf you find this dataset helpful for your work, please cite using this citation:" ]
a21afa639dfceb96fd4fc05f0ab6ab620aaff515
# Dataset Card for "openai_summarize_comparisons_tldrprompt" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mnoukhov/openai_summarize_comparisons_tldrprompt
[ "region:us" ]
2023-11-27T06:57:39+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}, {"split": "valid1", "path": "data/valid1-*"}, {"split": "valid2", "path": "data/valid2-*"}]}], "dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "chosen", "dtype": "string"}, {"name": "rejected", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 156593160, "num_examples": 92534}, {"name": "test", "num_bytes": 142265844, "num_examples": 83629}, {"name": "valid1", "num_bytes": 56388492, "num_examples": 33082}, {"name": "valid2", "num_bytes": 85940008, "num_examples": 50715}], "download_size": 61340645, "dataset_size": 441187504}}
2023-11-30T04:11:01+00:00
[]
[]
TAGS #region-us
# Dataset Card for "openai_summarize_comparisons_tldrprompt" More Information needed
[ "# Dataset Card for \"openai_summarize_comparisons_tldrprompt\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"openai_summarize_comparisons_tldrprompt\"\n\nMore Information needed" ]
[ 6, 27 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"openai_summarize_comparisons_tldrprompt\"\n\nMore Information needed" ]
fb7136239ac7b23d38eb142d6425e88ee0a61387
# Dataset Card for "smile_style_orca" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
brian-lim/smile_style_orca
[ "region:us" ]
2023-11-27T07:00:10+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "instruction", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 34830270, "num_examples": 31526}], "download_size": 2812219, "dataset_size": 34830270}}
2023-12-06T01:21:41+00:00
[]
[]
TAGS #region-us
# Dataset Card for "smile_style_orca" More Information needed
[ "# Dataset Card for \"smile_style_orca\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"smile_style_orca\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"smile_style_orca\"\n\nMore Information needed" ]
bfb3619e7fafe98526beb2d254971854c1a53520
This repo consists of the datasets used for the TaCo paper. There are four datasets: * Multilingual Alpaca-52K GPT-4 dataset * Multilingual Dolly-15K GPT-4 dataset * TaCo dataset * Multilingual Vicuna Benchmark dataset We translated the first three datasets using Google Cloud Translation. The TaCo dataset is created by using the TaCo approach as described in our paper, combining the Alpaca-52K and Dolly-15K datasets. If you would like to create the TaCo dataset for a specific language, you can simply follow the method as mentioned in the paper and use the above translated datasets. Link to the Paper: [Arxiv](https://arxiv.org/abs/2311.10797) ``` { "instruction": "instruction in xx", "input": "input in xx", "output": "Instruction in English: instruction in en , Response in English: response in en , Response in xx: response in xx " } ``` **Model Weights** We have released all of our model adapters in the HuggingFace. * [Taco Nepali-33B 🌮](https://huggingface.co/saillab/taco-nepali-33b) * [Taco Sanskrit-33B 🌮](https://huggingface.co/saillab/taco-sanskrit-33b) * [Taco Maithili-33B 🌮](https://huggingface.co/saillab/taco-maithili-33b) * [Taco Persian-33B 🌮](https://huggingface.co/saillab/taco-persian-33b) **Citation** ``` @article{upadhayay2023taco, title={TaCo: Enhancing Cross-Lingual Transfer for Low-Resource Languages in LLMs through Translation-Assisted Chain-of-Thought Processes}, author={Upadhayay, Bibek and Behzadan, Vahid}, journal={arXiv preprint arXiv:2311.10797}, year={2023} } ``` **Copyright and Intended Use** This dataset has been released under CC BY-NC, intended for academic and research purposes only. Please review the licenses and terms and conditions of Alpaca-52K, Dolly-15K, and Google Cloud Translation before using this dataset for any purpose other than research.
saillab/taco-datasets
[ "size_categories:100K<n<1M", "language:en", "language:ne", "language:sn", "language:mai", "language:fa", "language:hi", "language:af", "language:sq", "language:am", "language:ar", "language:hy", "language:as", "language:ay", "language:az", "language:bm", "language:eu", "language:be", "language:bn", "language:bh", "language:bs", "language:bg", "language:ca", "language:ceb", "language:ny", "language:zh", "language:co", "language:hr", "language:cs", "language:da", "language:dv", "language:dog", "language:nl", "language:eo", "language:et", "language:ee", "language:tl", "language:fi", "language:fr", "language:fy", "language:gl", "language:ka", "language:de", "language:el", "language:gn", "language:gu", "language:ht", "language:ha", "language:haw", "language:he", "language:hmn", "language:hu", "language:is", "language:ig", "language:ilo", "language:id", "language:ga", "language:it", "language:ja", "language:jv", "language:kn", "language:kk", "language:km", "language:rw", "language:kok", "language:ko", "language:kri", "language:ku", "language:ky", "language:lo", "language:la", "language:lv", "language:ln", "language:lt", "language:lg", "language:lb", "language:mk", "language:ml", "language:mt", "language:mi", "language:mr", "language:mni", "language:ms", "language:mg", "language:my", "language:no", "language:or", "language:om", "language:ps", "language:pl", "language:pt", "language:pa", "language:ro", "language:ru", "language:sm", "language:gd", "language:sr", "language:st", "language:tn", "language:sd", "language:si", "language:sk", "language:sl", "language:so", "language:es", "language:su", "language:sw", "language:sv", "language:tg", "language:ta", "language:tt", "language:te", "language:th", "language:ti", "language:to", "language:tr", "language:tk", "language:tw", "language:uk", "language:ur", "language:ug", "language:uz", "language:vi", "language:cy", "language:xh", "language:yi", "language:yo", "language:zu", "arxiv:2311.10797", "region:us" ]
2023-11-27T07:15:33+00:00
{"language": ["en", "ne", "sn", "mai", "fa", "hi", "af", "sq", "am", "ar", "hy", "as", "ay", "az", "bm", "eu", "be", "bn", "bh", "bs", "bg", "ca", "ceb", "ny", "zh", "co", "hr", "cs", "da", "dv", "dog", "nl", "eo", "et", "ee", "tl", "fi", "fr", "fy", "gl", "ka", "de", "el", "gn", "gu", "ht", "ha", "haw", "he", "hmn", "hu", "is", "ig", "ilo", "id", "ga", "it", "ja", "jv", "kn", "kk", "km", "rw", "kok", "ko", "kri", "ku", "ky", "lo", "la", "lv", "ln", "lt", "lg", "lb", "mk", "ml", "mt", "mi", "mr", "mni", "ms", "mg", "mt", "my", "no", "or", "om", "ps", "pl", "pt", "pa", "ro", "ru", "sm", "gd", "sr", "st", "tn", "sd", "si", "sk", "sl", "so", "es", "su", "sw", "sv", "tg", "ta", "tt", "te", "th", "ti", "to", "tr", "tk", "tw", "uk", "ur", "ug", "uz", "vi", "cy", "xh", "yi", "yo", "zu"], "size_categories": ["100K<n<1M"], "pretty_name": "t"}
2023-12-01T06:21:22+00:00
[ "2311.10797" ]
[ "en", "ne", "sn", "mai", "fa", "hi", "af", "sq", "am", "ar", "hy", "as", "ay", "az", "bm", "eu", "be", "bn", "bh", "bs", "bg", "ca", "ceb", "ny", "zh", "co", "hr", "cs", "da", "dv", "dog", "nl", "eo", "et", "ee", "tl", "fi", "fr", "fy", "gl", "ka", "de", "el", "gn", "gu", "ht", "ha", "haw", "he", "hmn", "hu", "is", "ig", "ilo", "id", "ga", "it", "ja", "jv", "kn", "kk", "km", "rw", "kok", "ko", "kri", "ku", "ky", "lo", "la", "lv", "ln", "lt", "lg", "lb", "mk", "ml", "mt", "mi", "mr", "mni", "ms", "mg", "my", "no", "or", "om", "ps", "pl", "pt", "pa", "ro", "ru", "sm", "gd", "sr", "st", "tn", "sd", "si", "sk", "sl", "so", "es", "su", "sw", "sv", "tg", "ta", "tt", "te", "th", "ti", "to", "tr", "tk", "tw", "uk", "ur", "ug", "uz", "vi", "cy", "xh", "yi", "yo", "zu" ]
TAGS #size_categories-100K<n<1M #language-English #language-Nepali (macrolanguage) #language-Shona #language-Maithili #language-Persian #language-Hindi #language-Afrikaans #language-Albanian #language-Amharic #language-Arabic #language-Armenian #language-Assamese #language-Aymara #language-Azerbaijani #language-Bambara #language-Basque #language-Belarusian #language-Bengali #language-bh #language-Bosnian #language-Bulgarian #language-Catalan #language-Cebuano #language-Nyanja #language-Chinese #language-Corsican #language-Croatian #language-Czech #language-Danish #language-Dhivehi #language-dog #language-Dutch #language-Esperanto #language-Estonian #language-Ewe #language-Tagalog #language-Finnish #language-French #language-Western Frisian #language-Galician #language-Georgian #language-German #language-Modern Greek (1453-) #language-Guarani #language-Gujarati #language-Haitian #language-Hausa #language-Hawaiian #language-Hebrew #language-Hmong #language-Hungarian #language-Icelandic #language-Igbo #language-Iloko #language-Indonesian #language-Irish #language-Italian #language-Japanese #language-Javanese #language-Kannada #language-Kazakh #language-Khmer #language-Kinyarwanda #language-Konkani (macrolanguage) #language-Korean #language-Krio #language-Kurdish #language-Kirghiz #language-Lao #language-Latin #language-Latvian #language-Lingala #language-Lithuanian #language-Ganda #language-Luxembourgish #language-Macedonian #language-Malayalam #language-Maltese #language-Maori #language-Marathi #language-Manipuri #language-Malay (macrolanguage) #language-Malagasy #language-Burmese #language-Norwegian #language-Oriya (macrolanguage) #language-Oromo #language-Pushto #language-Polish #language-Portuguese #language-Panjabi #language-Romanian #language-Russian #language-Samoan #language-Scottish Gaelic #language-Serbian #language-Southern Sotho #language-Tswana #language-Sindhi #language-Sinhala #language-Slovak #language-Slovenian #language-Somali #language-Spanish #language-Sundanese #language-Swahili (macrolanguage) #language-Swedish #language-Tajik #language-Tamil #language-Tatar #language-Telugu #language-Thai #language-Tigrinya #language-Tonga (Tonga Islands) #language-Turkish #language-Turkmen #language-Twi #language-Ukrainian #language-Urdu #language-Uighur #language-Uzbek #language-Vietnamese #language-Welsh #language-Xhosa #language-Yiddish #language-Yoruba #language-Zulu #arxiv-2311.10797 #region-us
This repo consists of the datasets used for the TaCo paper. There are four datasets: * Multilingual Alpaca-52K GPT-4 dataset * Multilingual Dolly-15K GPT-4 dataset * TaCo dataset * Multilingual Vicuna Benchmark dataset We translated the first three datasets using Google Cloud Translation. The TaCo dataset is created by using the TaCo approach as described in our paper, combining the Alpaca-52K and Dolly-15K datasets. If you would like to create the TaCo dataset for a specific language, you can simply follow the method as mentioned in the paper and use the above translated datasets. Link to the Paper: Arxiv Model Weights We have released all of our model adapters in the HuggingFace. * Taco Nepali-33B * Taco Sanskrit-33B * Taco Maithili-33B * Taco Persian-33B Citation Copyright and Intended Use This dataset has been released under CC BY-NC, intended for academic and research purposes only. Please review the licenses and terms and conditions of Alpaca-52K, Dolly-15K, and Google Cloud Translation before using this dataset for any purpose other than research.
[]
[ "TAGS\n#size_categories-100K<n<1M #language-English #language-Nepali (macrolanguage) #language-Shona #language-Maithili #language-Persian #language-Hindi #language-Afrikaans #language-Albanian #language-Amharic #language-Arabic #language-Armenian #language-Assamese #language-Aymara #language-Azerbaijani #language-Bambara #language-Basque #language-Belarusian #language-Bengali #language-bh #language-Bosnian #language-Bulgarian #language-Catalan #language-Cebuano #language-Nyanja #language-Chinese #language-Corsican #language-Croatian #language-Czech #language-Danish #language-Dhivehi #language-dog #language-Dutch #language-Esperanto #language-Estonian #language-Ewe #language-Tagalog #language-Finnish #language-French #language-Western Frisian #language-Galician #language-Georgian #language-German #language-Modern Greek (1453-) #language-Guarani #language-Gujarati #language-Haitian #language-Hausa #language-Hawaiian #language-Hebrew #language-Hmong #language-Hungarian #language-Icelandic #language-Igbo #language-Iloko #language-Indonesian #language-Irish #language-Italian #language-Japanese #language-Javanese #language-Kannada #language-Kazakh #language-Khmer #language-Kinyarwanda #language-Konkani (macrolanguage) #language-Korean #language-Krio #language-Kurdish #language-Kirghiz #language-Lao #language-Latin #language-Latvian #language-Lingala #language-Lithuanian #language-Ganda #language-Luxembourgish #language-Macedonian #language-Malayalam #language-Maltese #language-Maori #language-Marathi #language-Manipuri #language-Malay (macrolanguage) #language-Malagasy #language-Burmese #language-Norwegian #language-Oriya (macrolanguage) #language-Oromo #language-Pushto #language-Polish #language-Portuguese #language-Panjabi #language-Romanian #language-Russian #language-Samoan #language-Scottish Gaelic #language-Serbian #language-Southern Sotho #language-Tswana #language-Sindhi #language-Sinhala #language-Slovak #language-Slovenian #language-Somali #language-Spanish #language-Sundanese #language-Swahili (macrolanguage) #language-Swedish #language-Tajik #language-Tamil #language-Tatar #language-Telugu #language-Thai #language-Tigrinya #language-Tonga (Tonga Islands) #language-Turkish #language-Turkmen #language-Twi #language-Ukrainian #language-Urdu #language-Uighur #language-Uzbek #language-Vietnamese #language-Welsh #language-Xhosa #language-Yiddish #language-Yoruba #language-Zulu #arxiv-2311.10797 #region-us \n" ]
[ 765 ]
[ "passage: " ]
7cb9b53892549c58cbef6dd690090b6914693a3b
ARB data from [DuckAI](https://arb-dataset.netlify.app/). Categorized into `law, math, physics, reading, science`
imone/ARB
[ "license:mit", "region:us" ]
2023-11-27T08:01:11+00:00
{"license": "mit"}
2023-11-27T14:25:59+00:00
[]
[]
TAGS #license-mit #region-us
ARB data from DuckAI. Categorized into 'law, math, physics, reading, science'
[]
[ "TAGS\n#license-mit #region-us \n" ]
[ 11 ]
[ "passage: TAGS\n#license-mit #region-us \n" ]
42074edd69cc8521f0a73af54cece58a78a769a5
[ { "id": "1", "context": "Sørg for at din hånd er så afslappet som muligt, mens du stadig rammer alle tonerne korrekt - prøv også at undgå at lave for mange ekstra bevægelser med fingrene. På denne måde udmatter du dig selv så lidt som muligt. Husk at der ingen grund er til at ramme tangenterne hårdt for at få mere lyd ligesom på klaveret. For at få ekstra lydstyrke på harmonika bruger man blæsebælgene med højere tryk eller hastighed.", "question": "Hvad ville ifølge afsnittet være et unøjagtigt tip, når det drejer sig om at spille korrekt på en harmonika?", "answer": "For at få mere lyd, skal du trykke hårdere på tangenterne", "start_position": 108, "end_position": 129 }, { "id": "2", "context": "Danmark er et land med en rig historie og kultur. Landet er hjemsted for mange forskellige museer, der fortæller historien om Danmark og dets folk. Et af de mest populære museer i Danmark er Nationalmuseet, der ligger i København. Nationalmuseet har en samling på over 1 million genstande, der dækker alt fra forhistorisk tid til i dag.", "question": "Hvad er hovedstaden i Danmark?", "answer": "København", "start_position": 31, "end_position": 38 }, { "id": "3", "context": "Den danske madkultur er præget af en blanding af skandinaviske, tyske og franske traditioner. Nogle af de mest populære danske retter er stegt flæsk med persillesovs, frikadeller og smørrebrød.", "question": "Hvad er navnet på den traditionelle danske ret, der består af stegt flæsk, persillesovs og kartofler?", "answer": "Stegt flæsk med persillesovs", "start_position": 68, "end_position": 89 }, { "id": "4", "context": "Danmark er et land med en stærk socialdemokratisk tradition. Landet har et veludviklet socialt sikkerhedsnet, der sikrer borgerne en række rettigheder og ydelser.", "question": "Hvad er navnet på det danske parti, der er det største i Folketinget?", "answer": "Socialdemokratiet", "start_position": 80, "end_position": 98 }, { "id": "5", "context": "Danmark er et land med en befolkning på omkring 5,8 millioner mennesker. Landet er et af de mest veludviklede lande i verden og har en høj levestandard.", "question": "Hvad er den officielle religion i Danmark?", "answer": "Folkekirken", "start_position": 108, "end_position": 119 }, { "id": "6", "context": "Danmark er et land med en lang kystlinje. Landet har mange smukke strande, der er populære blandt turister.", "question": "Hvad er navnet på den danske ø, der er hjemsted for Roskilde Festival?", "answer": "Sjælland", "start_position": 65, "end_position": 77 } ]
EnKop/dan_test_QA_dataset
[ "region:us" ]
2023-11-27T08:33:51+00:00
{}
2023-11-27T10:20:43+00:00
[]
[]
TAGS #region-us
[ { "id": "1", "context": "Sørg for at din hånd er så afslappet som muligt, mens du stadig rammer alle tonerne korrekt - prøv også at undgå at lave for mange ekstra bevægelser med fingrene. På denne måde udmatter du dig selv så lidt som muligt. Husk at der ingen grund er til at ramme tangenterne hårdt for at få mere lyd ligesom på klaveret. For at få ekstra lydstyrke på harmonika bruger man blæsebælgene med højere tryk eller hastighed.", "question": "Hvad ville ifølge afsnittet være et unøjagtigt tip, når det drejer sig om at spille korrekt på en harmonika?", "answer": "For at få mere lyd, skal du trykke hårdere på tangenterne", "start_position": 108, "end_position": 129 }, { "id": "2", "context": "Danmark er et land med en rig historie og kultur. Landet er hjemsted for mange forskellige museer, der fortæller historien om Danmark og dets folk. Et af de mest populære museer i Danmark er Nationalmuseet, der ligger i København. Nationalmuseet har en samling på over 1 million genstande, der dækker alt fra forhistorisk tid til i dag.", "question": "Hvad er hovedstaden i Danmark?", "answer": "København", "start_position": 31, "end_position": 38 }, { "id": "3", "context": "Den danske madkultur er præget af en blanding af skandinaviske, tyske og franske traditioner. Nogle af de mest populære danske retter er stegt flæsk med persillesovs, frikadeller og smørrebrød.", "question": "Hvad er navnet på den traditionelle danske ret, der består af stegt flæsk, persillesovs og kartofler?", "answer": "Stegt flæsk med persillesovs", "start_position": 68, "end_position": 89 }, { "id": "4", "context": "Danmark er et land med en stærk socialdemokratisk tradition. Landet har et veludviklet socialt sikkerhedsnet, der sikrer borgerne en række rettigheder og ydelser.", "question": "Hvad er navnet på det danske parti, der er det største i Folketinget?", "answer": "Socialdemokratiet", "start_position": 80, "end_position": 98 }, { "id": "5", "context": "Danmark er et land med en befolkning på omkring 5,8 millioner mennesker. Landet er et af de mest veludviklede lande i verden og har en høj levestandard.", "question": "Hvad er den officielle religion i Danmark?", "answer": "Folkekirken", "start_position": 108, "end_position": 119 }, { "id": "6", "context": "Danmark er et land med en lang kystlinje. Landet har mange smukke strande, der er populære blandt turister.", "question": "Hvad er navnet på den danske ø, der er hjemsted for Roskilde Festival?", "answer": "Sjælland", "start_position": 65, "end_position": 77 } ]
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
4c3f1a4b43c207b3ebe0aada1daaca521930840b
# K-MMLU (Korean-MMLU) <font color='red'>🚧 This repo contains KMMLU-v0.3-preview. The dataset is under ongoing updates. 🚧</font> ### K-MMLU Description | Description | Count | |-------------------------|---------| | # of instance train | 208,440 | | # of instance dev | 215 | | # of instance test | 34,700 | | # of tests | 525 | | # of categories | 43 | | version | 0.3 | *Paper & CoT Samples Coming Soon!* The K-MMLU (Korean-MMLU) is a comprehensive suite designed to evaluate the advanced knowledge and reasoning abilities of large language models (LLMs) within the Korean language and cultural context. This suite encompasses 43 topics, primarily focusing on expert-level subjects. It includes general subjects like Physics and Ecology, law and political science, and specialized fields such as Non-Destructive Training and Maritime Engineering. The datasets are derived from Korean licensing exams, with about 90% of the questions including human accuracy based on the performance of human test-takers in these exams. K-MMLU is segmented into training, testing, and development subsets, with the test subset ranging from a minimum of 100 to a maximum of 1000 questions, totaling 34,732 questions. Additionally, a set of 5 questions is provided as a development set for few-shot exemplar development. In total, K-MMLU consists of 251,338 instances. For further information, see [g-sheet](https://docs.google.com/spreadsheets/d/1_6MjaHoYQ0fyzZImDh7YBpPerUV0WU9Wg2Az4MPgklw/edit?usp=sharing). ### Usage via LM-Eval-Harness Official implementation for the evaluation is now available! You may run the evaluations yourself by: ```python lm_eval --model hf \ --model_args pretrained=NousResearch/Llama-2-7b-chat-hf,dtype=float16 \ --num_fewshot 0 \ --batch_size 4 \ --tasks kmmlu \ --device cuda:0 ``` To install lm-eval-harness: ```python git clone https://github.com/EleutherAI/lm-evaluation-harness.git cd lm-evaluation-harness pip install -e . ``` ### Point of Contact For any questions contact us via the following email:) ``` [email protected] ```
HAERAE-HUB/K-MMLU-Preview
[ "task_categories:multiple-choice", "size_categories:10K<n<100K", "language:ko", "license:cc-by-nc-nd-4.0", "mmlu", "haerae", "region:us" ]
2023-11-27T09:06:18+00:00
{"language": ["ko"], "license": "cc-by-nc-nd-4.0", "size_categories": ["10K<n<100K"], "task_categories": ["multiple-choice"], "configs": [{"config_name": "Accounting", "data_files": [{"split": "train", "path": "data/Accounting-train.csv"}, {"split": "dev", "path": "data/Accounting-dev.csv"}, {"split": "test", "path": "data/Accounting-test.csv"}]}, {"config_name": "Agricultural-Sciences", "data_files": [{"split": "train", "path": "data/Agricultural-Sciences-train.csv"}, {"split": "dev", "path": "data/Agricultural-Sciences-dev.csv"}, {"split": "test", "path": "data/Agricultural-Sciences-test.csv"}]}, {"config_name": "Aviation-Engineering-and-Maintenance", "data_files": [{"split": "train", "path": "data/Aviation-Engineering-and-Maintenance-train.csv"}, {"split": "dev", "path": "data/Aviation-Engineering-and-Maintenance-dev.csv"}, {"split": "test", "path": "data/Aviation-Engineering-and-Maintenance-test.csv"}]}, {"config_name": "Biology", "data_files": [{"split": "train", "path": "data/Biology-train.csv"}, {"split": "dev", "path": "data/Biology-dev.csv"}, {"split": "test", "path": "data/Biology-test.csv"}]}, {"config_name": "Chemical-Engineering", "data_files": [{"split": "train", "path": "data/Chemical-Engineering-train.csv"}, {"split": "dev", "path": "data/Chemical-Engineering-dev.csv"}, {"split": "test", "path": "data/Chemical-Engineering-test.csv"}]}, {"config_name": "Chemistry", "data_files": [{"split": "train", "path": "data/Chemistry-train.csv"}, {"split": "dev", "path": "data/Chemistry-dev.csv"}, {"split": "test", "path": "data/Chemistry-test.csv"}]}, {"config_name": "Civil-Engineering", "data_files": [{"split": "train", "path": "data/Civil-Engineering-train.csv"}, {"split": "dev", "path": "data/Civil-Engineering-dev.csv"}, {"split": "test", "path": "data/Civil-Engineering-test.csv"}]}, {"config_name": "Computer-Science", "data_files": [{"split": "train", "path": "data/Computer-Science-train.csv"}, {"split": "dev", "path": "data/Computer-Science-dev.csv"}, {"split": "test", "path": "data/Computer-Science-test.csv"}]}, {"config_name": "Construction", "data_files": [{"split": "train", "path": "data/Construction-train.csv"}, {"split": "dev", "path": "data/Construction-dev.csv"}, {"split": "test", "path": "data/Construction-test.csv"}]}, {"config_name": "Criminal-Law", "data_files": [{"split": "train", "path": "data/Criminal-Law-train.csv"}, {"split": "dev", "path": "data/Criminal-Law-dev.csv"}, {"split": "test", "path": "data/Criminal-Law-test.csv"}]}, {"config_name": "Ecology", "data_files": [{"split": "train", "path": "data/Ecology-train.csv"}, {"split": "dev", "path": "data/Ecology-dev.csv"}, {"split": "test", "path": "data/Ecology-test.csv"}]}, {"config_name": "Economics", "data_files": [{"split": "train", "path": "data/Economics-train.csv"}, {"split": "dev", "path": "data/Economics-dev.csv"}, {"split": "test", "path": "data/Economics-test.csv"}]}, {"config_name": "Education", "data_files": [{"split": "train", "path": "data/Education-train.csv"}, {"split": "dev", "path": "data/Education-dev.csv"}, {"split": "test", "path": "data/Education-test.csv"}]}, {"config_name": "Electrical-Engineering", "data_files": [{"split": "train", "path": "data/Electrical-Engineering-train.csv"}, {"split": "dev", "path": "data/Electrical-Engineering-dev.csv"}, {"split": "test", "path": "data/Electrical-Engineering-test.csv"}]}, {"config_name": "Electronics-Engineering", "data_files": [{"split": "train", "path": "data/Electronics-Engineering-train.csv"}, {"split": "dev", "path": "data/Electronics-Engineering-dev.csv"}, {"split": "test", "path": "data/Electronics-Engineering-test.csv"}]}, {"config_name": "Energy-Management", "data_files": [{"split": "train", "path": "data/Energy-Management-train.csv"}, {"split": "dev", "path": "data/Energy-Management-dev.csv"}, {"split": "test", "path": "data/Energy-Management-test.csv"}]}, {"config_name": "Environmental-Science", "data_files": [{"split": "train", "path": "data/Environmental-Science-train.csv"}, {"split": "dev", "path": "data/Environmental-Science-dev.csv"}, {"split": "test", "path": "data/Environmental-Science-test.csv"}]}, {"config_name": "Fashion", "data_files": [{"split": "train", "path": "data/Fashion-train.csv"}, {"split": "dev", "path": "data/Fashion-dev.csv"}, {"split": "test", "path": "data/Fashion-test.csv"}]}, {"config_name": "Food-Processing", "data_files": [{"split": "train", "path": "data/Food-Processing-train.csv"}, {"split": "dev", "path": "data/Food-Processing-dev.csv"}, {"split": "test", "path": "data/Food-Processing-test.csv"}]}, {"config_name": "Gas-Technology-and-Engineering", "data_files": [{"split": "train", "path": "data/Gas-Technology-and-Engineering-train.csv"}, {"split": "dev", "path": "data/Gas-Technology-and-Engineering-dev.csv"}, {"split": "test", "path": "data/Gas-Technology-and-Engineering-test.csv"}]}, {"config_name": "Geomatics", "data_files": [{"split": "train", "path": "data/Geomatics-train.csv"}, {"split": "dev", "path": "data/Geomatics-dev.csv"}, {"split": "test", "path": "data/Geomatics-test.csv"}]}, {"config_name": "Health", "data_files": [{"split": "train", "path": "data/Health-train.csv"}, {"split": "dev", "path": "data/Health-dev.csv"}, {"split": "test", "path": "data/Health-test.csv"}]}, {"config_name": "Industrial-Engineer", "data_files": [{"split": "train", "path": "data/Industrial-Engineer-train.csv"}, {"split": "dev", "path": "data/Industrial-Engineer-dev.csv"}, {"split": "test", "path": "data/Industrial-Engineer-test.csv"}]}, {"config_name": "Information-Technology", "data_files": [{"split": "train", "path": "data/Information-Technology-train.csv"}, {"split": "dev", "path": "data/Information-Technology-dev.csv"}, {"split": "test", "path": "data/Information-Technology-test.csv"}]}, {"config_name": "Interior-Architecture-and-Design", "data_files": [{"split": "train", "path": "data/Interior-Architecture-and-Design-train.csv"}, {"split": "dev", "path": "data/Interior-Architecture-and-Design-dev.csv"}, {"split": "test", "path": "data/Interior-Architecture-and-Design-test.csv"}]}, {"config_name": "Law", "data_files": [{"split": "train", "path": "data/Law-train.csv"}, {"split": "dev", "path": "data/Law-dev.csv"}, {"split": "test", "path": "data/Law-test.csv"}]}, {"config_name": "Machine-Design-and-Manufacturing", "data_files": [{"split": "train", "path": "data/Machine-Design-and-Manufacturing-train.csv"}, {"split": "dev", "path": "data/Machine-Design-and-Manufacturing-dev.csv"}, {"split": "test", "path": "data/Machine-Design-and-Manufacturing-test.csv"}]}, {"config_name": "Management", "data_files": [{"split": "train", "path": "data/Management-train.csv"}, {"split": "dev", "path": "data/Management-dev.csv"}, {"split": "test", "path": "data/Management-test.csv"}]}, {"config_name": "Maritime-Engineering", "data_files": [{"split": "train", "path": "data/Maritime-Engineering-train.csv"}, {"split": "dev", "path": "data/Maritime-Engineering-dev.csv"}, {"split": "test", "path": "data/Maritime-Engineering-test.csv"}]}, {"config_name": "Marketing", "data_files": [{"split": "train", "path": "data/Marketing-train.csv"}, {"split": "dev", "path": "data/Marketing-dev.csv"}, {"split": "test", "path": "data/Marketing-test.csv"}]}, {"config_name": "Materials-Engineering", "data_files": [{"split": "train", "path": "data/Materials-Engineering-train.csv"}, {"split": "dev", "path": "data/Materials-Engineering-dev.csv"}, {"split": "test", "path": "data/Materials-Engineering-test.csv"}]}, {"config_name": "Mechanical-Engineering", "data_files": [{"split": "train", "path": "data/Mechanical-Engineering-train.csv"}, {"split": "dev", "path": "data/Mechanical-Engineering-dev.csv"}, {"split": "test", "path": "data/Mechanical-Engineering-test.csv"}]}, {"config_name": "Nondestructive-Testing", "data_files": [{"split": "train", "path": "data/Nondestructive-Testing-train.csv"}, {"split": "dev", "path": "data/Nondestructive-Testing-dev.csv"}, {"split": "test", "path": "data/Nondestructive-Testing-test.csv"}]}, {"config_name": "Patent", "data_files": [{"split": "train", "path": "data/Patent-train.csv"}, {"split": "dev", "path": "data/Patent-dev.csv"}, {"split": "test", "path": "data/Patent-test.csv"}]}, {"config_name": "Political-Science-and-Sociology", "data_files": [{"split": "train", "path": "data/Political-Science-and-Sociology-train.csv"}, {"split": "dev", "path": "data/Political-Science-and-Sociology-dev.csv"}, {"split": "test", "path": "data/Political-Science-and-Sociology-test.csv"}]}, {"config_name": "Psychology", "data_files": [{"split": "train", "path": "data/Psychology-train.csv"}, {"split": "dev", "path": "data/Psychology-dev.csv"}, {"split": "test", "path": "data/Psychology-test.csv"}]}, {"config_name": "Public-Safety", "data_files": [{"split": "train", "path": "data/Public-Safety-train.csv"}, {"split": "dev", "path": "data/Public-Safety-dev.csv"}, {"split": "test", "path": "data/Public-Safety-test.csv"}]}, {"config_name": "Railway-and-Automotive-Engineering", "data_files": [{"split": "train", "path": "data/Railway-and-Automotive-Engineering-train.csv"}, {"split": "dev", "path": "data/Railway-and-Automotive-Engineering-dev.csv"}, {"split": "test", "path": "data/Railway-and-Automotive-Engineering-test.csv"}]}, {"config_name": "Real-Estate", "data_files": [{"split": "train", "path": "data/Real-Estate-train.csv"}, {"split": "dev", "path": "data/Real-Estate-dev.csv"}, {"split": "test", "path": "data/Real-Estate-test.csv"}]}, {"config_name": "Refrigerating-Machinery", "data_files": [{"split": "train", "path": "data/Refrigerating-Machinery-train.csv"}, {"split": "dev", "path": "data/Refrigerating-Machinery-dev.csv"}, {"split": "test", "path": "data/Refrigerating-Machinery-test.csv"}]}, {"config_name": "Social-Welfare", "data_files": [{"split": "train", "path": "data/Social-Welfare-train.csv"}, {"split": "dev", "path": "data/Social-Welfare-dev.csv"}, {"split": "test", "path": "data/Social-Welfare-test.csv"}]}, {"config_name": "Taxation", "data_files": [{"split": "train", "path": "data/Taxation-train.csv"}, {"split": "dev", "path": "data/Taxation-dev.csv"}, {"split": "test", "path": "data/Taxation-test.csv"}]}, {"config_name": "Telecommunications-and-Wireless-Technology", "data_files": [{"split": "train", "path": "data/Telecommunications-and-Wireless-Technology-train.csv"}, {"split": "dev", "path": "data/Telecommunications-and-Wireless-Technology-dev.csv"}, {"split": "test", "path": "data/Telecommunications-and-Wireless-Technology-test.csv"}]}, {"config_name": "Korean-History", "data_files": [{"split": "train", "path": "data/korean-history-train.csv"}, {"split": "dev", "path": "data/korean-history-dev.csv"}, {"split": "test", "path": "data/korean-history-test.csv"}]}, {"config_name": "Math", "data_files": [{"split": "train", "path": "data/math-train.csv"}, {"split": "dev", "path": "data/math-dev.csv"}, {"split": "test", "path": "data/math-test.csv"}]}], "tags": ["mmlu", "haerae"]}
2024-01-01T09:30:22+00:00
[]
[ "ko" ]
TAGS #task_categories-multiple-choice #size_categories-10K<n<100K #language-Korean #license-cc-by-nc-nd-4.0 #mmlu #haerae #region-us
K-MMLU (Korean-MMLU) ==================== This repo contains KMMLU-v0.3-preview. The dataset is under ongoing updates. ### K-MMLU Description *Paper & CoT Samples Coming Soon!* The K-MMLU (Korean-MMLU) is a comprehensive suite designed to evaluate the advanced knowledge and reasoning abilities of large language models (LLMs) within the Korean language and cultural context. This suite encompasses 43 topics, primarily focusing on expert-level subjects. It includes general subjects like Physics and Ecology, law and political science, and specialized fields such as Non-Destructive Training and Maritime Engineering. The datasets are derived from Korean licensing exams, with about 90% of the questions including human accuracy based on the performance of human test-takers in these exams. K-MMLU is segmented into training, testing, and development subsets, with the test subset ranging from a minimum of 100 to a maximum of 1000 questions, totaling 34,732 questions. Additionally, a set of 5 questions is provided as a development set for few-shot exemplar development. In total, K-MMLU consists of 251,338 instances. For further information, see g-sheet. ### Usage via LM-Eval-Harness Official implementation for the evaluation is now available! You may run the evaluations yourself by: To install lm-eval-harness: ### Point of Contact For any questions contact us via the following email:)
[ "### K-MMLU Description\n\n\n\n*Paper & CoT Samples Coming Soon!*\n\n\nThe K-MMLU (Korean-MMLU) is a comprehensive suite designed to evaluate the advanced knowledge and reasoning abilities of large language models (LLMs)\nwithin the Korean language and cultural context. This suite encompasses 43 topics, primarily focusing on expert-level subjects.\nIt includes general subjects like Physics and Ecology, law and political science, and specialized fields such as Non-Destructive Training and Maritime Engineering.\nThe datasets are derived from Korean licensing exams, with about 90% of the questions including human accuracy based on the performance of human test-takers in these exams.\nK-MMLU is segmented into training, testing, and development subsets, with the test subset ranging from a minimum of 100 to a maximum of 1000 questions, totaling 34,732 questions.\nAdditionally, a set of 5 questions is provided as a development set for few-shot exemplar development.\nIn total, K-MMLU consists of 251,338 instances. For further information, see g-sheet.", "### Usage via LM-Eval-Harness\n\n\nOfficial implementation for the evaluation is now available! You may run the evaluations yourself by:\n\n\nTo install lm-eval-harness:", "### Point of Contact\n\n\nFor any questions contact us via the following email:)" ]
[ "TAGS\n#task_categories-multiple-choice #size_categories-10K<n<100K #language-Korean #license-cc-by-nc-nd-4.0 #mmlu #haerae #region-us \n", "### K-MMLU Description\n\n\n\n*Paper & CoT Samples Coming Soon!*\n\n\nThe K-MMLU (Korean-MMLU) is a comprehensive suite designed to evaluate the advanced knowledge and reasoning abilities of large language models (LLMs)\nwithin the Korean language and cultural context. This suite encompasses 43 topics, primarily focusing on expert-level subjects.\nIt includes general subjects like Physics and Ecology, law and political science, and specialized fields such as Non-Destructive Training and Maritime Engineering.\nThe datasets are derived from Korean licensing exams, with about 90% of the questions including human accuracy based on the performance of human test-takers in these exams.\nK-MMLU is segmented into training, testing, and development subsets, with the test subset ranging from a minimum of 100 to a maximum of 1000 questions, totaling 34,732 questions.\nAdditionally, a set of 5 questions is provided as a development set for few-shot exemplar development.\nIn total, K-MMLU consists of 251,338 instances. For further information, see g-sheet.", "### Usage via LM-Eval-Harness\n\n\nOfficial implementation for the evaluation is now available! You may run the evaluations yourself by:\n\n\nTo install lm-eval-harness:", "### Point of Contact\n\n\nFor any questions contact us via the following email:)" ]
[ 55, 255, 42, 16 ]
[ "passage: TAGS\n#task_categories-multiple-choice #size_categories-10K<n<100K #language-Korean #license-cc-by-nc-nd-4.0 #mmlu #haerae #region-us \n### K-MMLU Description\n\n\n\n*Paper & CoT Samples Coming Soon!*\n\n\nThe K-MMLU (Korean-MMLU) is a comprehensive suite designed to evaluate the advanced knowledge and reasoning abilities of large language models (LLMs)\nwithin the Korean language and cultural context. This suite encompasses 43 topics, primarily focusing on expert-level subjects.\nIt includes general subjects like Physics and Ecology, law and political science, and specialized fields such as Non-Destructive Training and Maritime Engineering.\nThe datasets are derived from Korean licensing exams, with about 90% of the questions including human accuracy based on the performance of human test-takers in these exams.\nK-MMLU is segmented into training, testing, and development subsets, with the test subset ranging from a minimum of 100 to a maximum of 1000 questions, totaling 34,732 questions.\nAdditionally, a set of 5 questions is provided as a development set for few-shot exemplar development.\nIn total, K-MMLU consists of 251,338 instances. For further information, see g-sheet.### Usage via LM-Eval-Harness\n\n\nOfficial implementation for the evaluation is now available! You may run the evaluations yourself by:\n\n\nTo install lm-eval-harness:### Point of Contact\n\n\nFor any questions contact us via the following email:)" ]
702c897c4d872337ea439a06c5c074f48ed5aaee
# Dataset Card for Dataset Name <!-- Provide a quick summary of the dataset. --> This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
alisuper/testing-jawad
[ "region:us" ]
2023-11-27T09:12:00+00:00
{}
2023-11-27T09:13:47+00:00
[]
[]
TAGS #region-us
# Dataset Card for Dataset Name This dataset card aims to be a base template for new datasets. It has been generated using this raw template. ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 34, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
0e1914fd1f9398a7260d0f558b4641964321fbbd
# Dataset Card for go_emotions_raw This dataset has been created with [Argilla](https://docs.argilla.io). As shown in the sections below, this dataset can be loaded into Argilla as explained in [Load with Argilla](#load-with-argilla), or used directly with the `datasets` library in [Load with `datasets`](#load-with-datasets). ## Dataset Description - **Homepage:** https://argilla.io - **Repository:** https://github.com/argilla-io/argilla - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary It contains the raw version of [go_emotions](https://huggingface.co/datasets/go_emotions) as a `FeedbackDataset`. Each of the original questions are defined a single `FeedbackRecord` and contain the `responses` from each annotator. The final labels in the *simplified* version of the dataset have been used as `suggestions`, so that we can use this dataset to showcase the metrics related to the agreement between annotators as well as the `responses` vs `suggestions` metrics. This dataset contains: * A dataset configuration file conforming to the Argilla dataset format named `argilla.yaml`. This configuration file will be used to configure the dataset when using the `FeedbackDataset.from_huggingface` method in Argilla. * Dataset records in a format compatible with HuggingFace `datasets`. These records will be loaded automatically when using `FeedbackDataset.from_huggingface` and can be loaded independently using the `datasets` library via `load_dataset`. * The [annotation guidelines](#annotation-guidelines) that have been used for building and curating the dataset, if they've been defined in Argilla. ### Load with Argilla To load with Argilla, you'll just need to install Argilla as `pip install argilla --upgrade` and then use the following code: ```python import argilla as rg ds = rg.FeedbackDataset.from_huggingface("argilla/go_emotions_raw") ``` ### Load with `datasets` To load this dataset with `datasets`, you'll just need to install `datasets` as `pip install datasets --upgrade` and then use the following code: ```python from datasets import load_dataset ds = load_dataset("argilla/go_emotions_raw") ``` ### Supported Tasks and Leaderboards This dataset can contain [multiple fields, questions and responses](https://docs.argilla.io/en/latest/conceptual_guides/data_model.html#feedback-dataset) so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the [Dataset Structure section](#dataset-structure). There are no leaderboards associated with this dataset. ### Languages [More Information Needed] ## Dataset Structure ### Data in Argilla The dataset is created in Argilla with: **fields**, **questions**, **suggestions**, **metadata**, **vectors**, and **guidelines**. The **fields** are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. | Field Name | Title | Type | Required | Markdown | | ---------- | ----- | ---- | -------- | -------- | | text | Text | FieldTypes.text | True | False | The **questions** are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label_selection, multi_label_selection, or ranking. | Question Name | Title | Type | Required | Description | Values/Labels | | ------------- | ----- | ---- | -------- | ----------- | ------------- | | label | Label | QuestionTypes.multi_label_selection | True | Classify the text by selecting the correct label from the given list of labels. | ['admiration', 'amusement', 'anger', 'annoyance', 'approval', 'caring', 'confusion', 'curiosity', 'desire', 'disappointment', 'disapproval', 'disgust', 'embarrassment', 'excitement', 'fear', 'gratitude', 'grief', 'joy', 'love', 'nervousness', 'optimism', 'pride', 'realization', 'relief', 'remorse', 'sadness', 'surprise', 'neutral'] | The **suggestions** are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending "-suggestion" and "-suggestion-metadata" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with "-suggestion" and the metadata is appended with "-suggestion-metadata". The **metadata** is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the `metadata_properties` defined in the dataset configuration file in `argilla.yaml`. | Metadata Name | Title | Type | Values | Visible for Annotators | | ------------- | ----- | ---- | ------ | ---------------------- | The **guidelines**, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the [annotation guidelines](#annotation-guidelines) section. ### Data Instances An example of a dataset instance in Argilla looks as follows: ```json { "external_id": null, "fields": { "text": " \"If you don\u0027t wear BROWN AND ORANGE...YOU DON\u0027T MATTER!\" We need a tshirt with that on it asap! " }, "metadata": {}, "responses": [ { "status": "submitted", "user_id": "00000000-0000-0000-0000-000000000001", "values": { "label": { "value": [ "neutral" ] } } }, { "status": "submitted", "user_id": "00000000-0000-0000-0000-000000000016", "values": { "label": { "value": [ "anger", "annoyance", "optimism" ] } } }, { "status": "submitted", "user_id": "00000000-0000-0000-0000-000000000028", "values": { "label": { "value": [ "approval" ] } } }, { "status": "submitted", "user_id": "00000000-0000-0000-0000-000000000039", "values": { "label": { "value": [ "neutral" ] } } }, { "status": "submitted", "user_id": "00000000-0000-0000-0000-000000000048", "values": { "label": { "value": [ "annoyance" ] } } } ], "suggestions": [ { "agent": null, "question_name": "label", "score": null, "type": "human", "value": [ "annoyance", "neutral" ] } ], "vectors": {} } ``` While the same record in HuggingFace `datasets` looks as follows: ```json { "external_id": null, "label": [ { "status": "submitted", "user_id": "00000000-0000-0000-0000-000000000001", "value": [ "neutral" ] }, { "status": "submitted", "user_id": "00000000-0000-0000-0000-000000000016", "value": [ "anger", "annoyance", "optimism" ] }, { "status": "submitted", "user_id": "00000000-0000-0000-0000-000000000028", "value": [ "approval" ] }, { "status": "submitted", "user_id": "00000000-0000-0000-0000-000000000039", "value": [ "neutral" ] }, { "status": "submitted", "user_id": "00000000-0000-0000-0000-000000000048", "value": [ "annoyance" ] } ], "label-suggestion": [ "annoyance", "neutral" ], "label-suggestion-metadata": { "agent": null, "score": null, "type": "human" }, "metadata": "{}", "text": " \"If you don\u0027t wear BROWN AND ORANGE...YOU DON\u0027T MATTER!\" We need a tshirt with that on it asap! " } ``` ### Data Fields Among the dataset fields, we differentiate between the following: * **Fields:** These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. * **text** is of type `FieldTypes.text`. * **Questions:** These are the questions that will be asked to the annotators. They can be of different types, such as `RatingQuestion`, `TextQuestion`, `LabelQuestion`, `MultiLabelQuestion`, and `RankingQuestion`. * **label** is of type `QuestionTypes.multi_label_selection` with the following allowed values ['admiration', 'amusement', 'anger', 'annoyance', 'approval', 'caring', 'confusion', 'curiosity', 'desire', 'disappointment', 'disapproval', 'disgust', 'embarrassment', 'excitement', 'fear', 'gratitude', 'grief', 'joy', 'love', 'nervousness', 'optimism', 'pride', 'realization', 'relief', 'remorse', 'sadness', 'surprise', 'neutral'], and description "Classify the text by selecting the correct label from the given list of labels.". * **Suggestions:** As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable. * (optional) **label-suggestion** is of type `QuestionTypes.multi_label_selection` with the following allowed values ['admiration', 'amusement', 'anger', 'annoyance', 'approval', 'caring', 'confusion', 'curiosity', 'desire', 'disappointment', 'disapproval', 'disgust', 'embarrassment', 'excitement', 'fear', 'gratitude', 'grief', 'joy', 'love', 'nervousness', 'optimism', 'pride', 'realization', 'relief', 'remorse', 'sadness', 'surprise', 'neutral']. Additionally, we also have two more fields that are optional and are the following: * **metadata:** This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the `metadata_properties` defined in the dataset configuration file in `argilla.yaml`. * **external_id:** This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file. ### Data Splits The dataset contains a single split, which is `train`. ## Dataset Creation ### Script used for the generation ```python import argilla as rg from datasets import load_dataset import uuid from datasets import concatenate_datasets ds = load_dataset("go_emotions", "raw", split="train") ds_prepared = load_dataset("go_emotions") _CLASS_NAMES = [ "admiration", "amusement", "anger", "annoyance", "approval", "caring", "confusion", "curiosity", "desire", "disappointment", "disapproval", "disgust", "embarrassment", "excitement", "fear", "gratitude", "grief", "joy", "love", "nervousness", "optimism", "pride", "realization", "relief", "remorse", "sadness", "surprise", "neutral", ] label_to_id = {label: i for i, label in enumerate(_CLASS_NAMES)} id_to_label = {i: label for i, label in enumerate(_CLASS_NAMES)} # Concatenate the datasets and transform to pd.DataFrame ds_prepared = concatenate_datasets([ds_prepared["train"], ds_prepared["validation"], ds_prepared["test"]]) df_prepared = ds_prepared.to_pandas() # Obtain the final labels as a dict, to later include these as suggestions labels_prepared = {} for idx in df_prepared.index: labels = [id_to_label[label_id] for label_id in df_prepared['labels'][idx]] labels_prepared[df_prepared['id'][idx]] = labels # Add labels to the dataset and keep only the relevant columns def add_labels(ex): labels = [] for label in _CLASS_NAMES: if ex[label] == 1: labels.append(label) ex["labels"] = labels return ex ds = ds.map(add_labels) df = ds.select_columns(["text", "labels", "rater_id", "id"]).to_pandas() # Create a FeedbackDataset for text classification feedback_dataset = rg.FeedbackDataset.for_text_classification(labels=_CLASS_NAMES, multi_label=True) # Create the records with the original responses, and use as suggestions # the final labels in the "simplified" go_emotions dataset. records = [] for text, df_text in df.groupby("text"): responses = [] for rater_id, df_raters in df_text.groupby("rater_id"): responses.append( { "values": {"label": {"value": df_raters["labels"].iloc[0].tolist()}}, "status": "submitted", "user_id": uuid.UUID(int=rater_id), } ) suggested_labels = labels_prepared.get(df_raters["id"].iloc[0], None) if not suggested_labels: continue suggestion = [ { "question_name": "label", "value": suggested_labels, "type": "human", } ] records.append( rg.FeedbackRecord( fields={"text": df_raters["text"].iloc[0]}, responses=responses, suggestions=suggestion ) ) feedback_dataset.add_records(records) # Push to the hub feedback_dataset.push_to_huggingface("plaguss/go_emotions_raw") ``` ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation guidelines This is a text classification dataset that contains texts and labels. Given a set of texts and a predefined set of labels, the goal of text classification is to assign one or more labels to each text based on its content. Please classify the texts by making the correct selection. #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
argilla/go_emotions_raw
[ "size_categories:10K<n<100K", "rlfh", "argilla", "human-feedback", "region:us" ]
2023-11-27T09:36:08+00:00
{"size_categories": "10K<n<100K", "tags": ["rlfh", "argilla", "human-feedback"]}
2023-11-27T09:52:53+00:00
[]
[]
TAGS #size_categories-10K<n<100K #rlfh #argilla #human-feedback #region-us
Dataset Card for go\_emotions\_raw ================================== This dataset has been created with Argilla. As shown in the sections below, this dataset can be loaded into Argilla as explained in Load with Argilla, or used directly with the 'datasets' library in Load with 'datasets'. Dataset Description ------------------- * Homepage: URL * Repository: URL * Paper: * Leaderboard: * Point of Contact: ### Dataset Summary It contains the raw version of go\_emotions as a 'FeedbackDataset'. Each of the original questions are defined a single 'FeedbackRecord' and contain the 'responses' from each annotator. The final labels in the *simplified* version of the dataset have been used as 'suggestions', so that we can use this dataset to showcase the metrics related to the agreement between annotators as well as the 'responses' vs 'suggestions' metrics. This dataset contains: * A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\_huggingface' method in Argilla. * Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\_huggingface' and can be loaded independently using the 'datasets' library via 'load\_dataset'. * The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla. ### Load with Argilla To load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code: ### Load with 'datasets' To load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code: ### Supported Tasks and Leaderboards This dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section. There are no leaderboards associated with this dataset. ### Languages Dataset Structure ----------------- ### Data in Argilla The dataset is created in Argilla with: fields, questions, suggestions, metadata, vectors, and guidelines. The fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. The questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\_selection, multi\_label\_selection, or ranking. The suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending "-suggestion" and "-suggestion-metadata" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with "-suggestion" and the metadata is appended with "-suggestion-metadata". The metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\_properties' defined in the dataset configuration file in 'URL'. The guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section. ### Data Instances An example of a dataset instance in Argilla looks as follows: While the same record in HuggingFace 'datasets' looks as follows: ### Data Fields Among the dataset fields, we differentiate between the following: * Fields: These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. + text is of type 'URL'. * Questions: These are the questions that will be asked to the annotators. They can be of different types, such as 'RatingQuestion', 'TextQuestion', 'LabelQuestion', 'MultiLabelQuestion', and 'RankingQuestion'. + label is of type 'QuestionTypes.multi\_label\_selection' with the following allowed values ['admiration', 'amusement', 'anger', 'annoyance', 'approval', 'caring', 'confusion', 'curiosity', 'desire', 'disappointment', 'disapproval', 'disgust', 'embarrassment', 'excitement', 'fear', 'gratitude', 'grief', 'joy', 'love', 'nervousness', 'optimism', 'pride', 'realization', 'relief', 'remorse', 'sadness', 'surprise', 'neutral'], and description "Classify the text by selecting the correct label from the given list of labels.". * Suggestions: As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable. + (optional) label-suggestion is of type 'QuestionTypes.multi\_label\_selection' with the following allowed values ['admiration', 'amusement', 'anger', 'annoyance', 'approval', 'caring', 'confusion', 'curiosity', 'desire', 'disappointment', 'disapproval', 'disgust', 'embarrassment', 'excitement', 'fear', 'gratitude', 'grief', 'joy', 'love', 'nervousness', 'optimism', 'pride', 'realization', 'relief', 'remorse', 'sadness', 'surprise', 'neutral']. Additionally, we also have two more fields that are optional and are the following: * metadata: This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\_properties' defined in the dataset configuration file in 'URL'. * external\_id: This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file. ### Data Splits The dataset contains a single split, which is 'train'. Dataset Creation ---------------- ### Script used for the generation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation guidelines This is a text classification dataset that contains texts and labels. Given a set of texts and a predefined set of labels, the goal of text classification is to assign one or more labels to each text based on its content. Please classify the texts by making the correct selection. #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information Considerations for Using the Data --------------------------------- ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations Additional Information ---------------------- ### Dataset Curators ### Licensing Information ### Contributions
[ "### Dataset Summary\n\n\nIt contains the raw version of go\\_emotions as a 'FeedbackDataset'. Each of the original questions are defined a single\n'FeedbackRecord' and contain the 'responses' from each annotator. The final labels in the *simplified* version of the dataset have been used as 'suggestions', so that we\ncan use this dataset to showcase the metrics related to the agreement between annotators as well as the 'responses' vs 'suggestions' metrics.\n\n\nThis dataset contains:\n\n\n* A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\\_huggingface' method in Argilla.\n* Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\\_huggingface' and can be loaded independently using the 'datasets' library via 'load\\_dataset'.\n* The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla.", "### Load with Argilla\n\n\nTo load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code:", "### Load with 'datasets'\n\n\nTo load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code:", "### Supported Tasks and Leaderboards\n\n\nThis dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section.\n\n\nThere are no leaderboards associated with this dataset.", "### Languages\n\n\nDataset Structure\n-----------------", "### Data in Argilla\n\n\nThe dataset is created in Argilla with: fields, questions, suggestions, metadata, vectors, and guidelines.\n\n\nThe fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\nThe questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\\_selection, multi\\_label\\_selection, or ranking.\n\n\n\nThe suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending \"-suggestion\" and \"-suggestion-metadata\" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with \"-suggestion\" and the metadata is appended with \"-suggestion-metadata\".\n\n\nThe metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n\n\n\nThe guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section.", "### Data Instances\n\n\nAn example of a dataset instance in Argilla looks as follows:\n\n\nWhile the same record in HuggingFace 'datasets' looks as follows:", "### Data Fields\n\n\nAmong the dataset fields, we differentiate between the following:\n\n\n* Fields: These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\t+ text is of type 'URL'.\n* Questions: These are the questions that will be asked to the annotators. They can be of different types, such as 'RatingQuestion', 'TextQuestion', 'LabelQuestion', 'MultiLabelQuestion', and 'RankingQuestion'.\n\n\n\t+ label is of type 'QuestionTypes.multi\\_label\\_selection' with the following allowed values ['admiration', 'amusement', 'anger', 'annoyance', 'approval', 'caring', 'confusion', 'curiosity', 'desire', 'disappointment', 'disapproval', 'disgust', 'embarrassment', 'excitement', 'fear', 'gratitude', 'grief', 'joy', 'love', 'nervousness', 'optimism', 'pride', 'realization', 'relief', 'remorse', 'sadness', 'surprise', 'neutral'], and description \"Classify the text by selecting the correct label from the given list of labels.\".\n* Suggestions: As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable.\n\n\n\t+ (optional) label-suggestion is of type 'QuestionTypes.multi\\_label\\_selection' with the following allowed values ['admiration', 'amusement', 'anger', 'annoyance', 'approval', 'caring', 'confusion', 'curiosity', 'desire', 'disappointment', 'disapproval', 'disgust', 'embarrassment', 'excitement', 'fear', 'gratitude', 'grief', 'joy', 'love', 'nervousness', 'optimism', 'pride', 'realization', 'relief', 'remorse', 'sadness', 'surprise', 'neutral'].\n\n\nAdditionally, we also have two more fields that are optional and are the following:\n\n\n* metadata: This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n* external\\_id: This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file.", "### Data Splits\n\n\nThe dataset contains a single split, which is 'train'.\n\n\nDataset Creation\n----------------", "### Script used for the generation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation guidelines\n\n\nThis is a text classification dataset that contains texts and labels. Given a set of texts and a predefined set of labels, the goal of text classification is to assign one or more labels to each text based on its content. Please classify the texts by making the correct selection.", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#size_categories-10K<n<100K #rlfh #argilla #human-feedback #region-us \n", "### Dataset Summary\n\n\nIt contains the raw version of go\\_emotions as a 'FeedbackDataset'. Each of the original questions are defined a single\n'FeedbackRecord' and contain the 'responses' from each annotator. The final labels in the *simplified* version of the dataset have been used as 'suggestions', so that we\ncan use this dataset to showcase the metrics related to the agreement between annotators as well as the 'responses' vs 'suggestions' metrics.\n\n\nThis dataset contains:\n\n\n* A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\\_huggingface' method in Argilla.\n* Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\\_huggingface' and can be loaded independently using the 'datasets' library via 'load\\_dataset'.\n* The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla.", "### Load with Argilla\n\n\nTo load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code:", "### Load with 'datasets'\n\n\nTo load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code:", "### Supported Tasks and Leaderboards\n\n\nThis dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section.\n\n\nThere are no leaderboards associated with this dataset.", "### Languages\n\n\nDataset Structure\n-----------------", "### Data in Argilla\n\n\nThe dataset is created in Argilla with: fields, questions, suggestions, metadata, vectors, and guidelines.\n\n\nThe fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\nThe questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\\_selection, multi\\_label\\_selection, or ranking.\n\n\n\nThe suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending \"-suggestion\" and \"-suggestion-metadata\" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with \"-suggestion\" and the metadata is appended with \"-suggestion-metadata\".\n\n\nThe metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n\n\n\nThe guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section.", "### Data Instances\n\n\nAn example of a dataset instance in Argilla looks as follows:\n\n\nWhile the same record in HuggingFace 'datasets' looks as follows:", "### Data Fields\n\n\nAmong the dataset fields, we differentiate between the following:\n\n\n* Fields: These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\t+ text is of type 'URL'.\n* Questions: These are the questions that will be asked to the annotators. They can be of different types, such as 'RatingQuestion', 'TextQuestion', 'LabelQuestion', 'MultiLabelQuestion', and 'RankingQuestion'.\n\n\n\t+ label is of type 'QuestionTypes.multi\\_label\\_selection' with the following allowed values ['admiration', 'amusement', 'anger', 'annoyance', 'approval', 'caring', 'confusion', 'curiosity', 'desire', 'disappointment', 'disapproval', 'disgust', 'embarrassment', 'excitement', 'fear', 'gratitude', 'grief', 'joy', 'love', 'nervousness', 'optimism', 'pride', 'realization', 'relief', 'remorse', 'sadness', 'surprise', 'neutral'], and description \"Classify the text by selecting the correct label from the given list of labels.\".\n* Suggestions: As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable.\n\n\n\t+ (optional) label-suggestion is of type 'QuestionTypes.multi\\_label\\_selection' with the following allowed values ['admiration', 'amusement', 'anger', 'annoyance', 'approval', 'caring', 'confusion', 'curiosity', 'desire', 'disappointment', 'disapproval', 'disgust', 'embarrassment', 'excitement', 'fear', 'gratitude', 'grief', 'joy', 'love', 'nervousness', 'optimism', 'pride', 'realization', 'relief', 'remorse', 'sadness', 'surprise', 'neutral'].\n\n\nAdditionally, we also have two more fields that are optional and are the following:\n\n\n* metadata: This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n* external\\_id: This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file.", "### Data Splits\n\n\nThe dataset contains a single split, which is 'train'.\n\n\nDataset Creation\n----------------", "### Script used for the generation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation guidelines\n\n\nThis is a text classification dataset that contains texts and labels. Given a set of texts and a predefined set of labels, the goal of text classification is to assign one or more labels to each text based on its content. Please classify the texts by making the correct selection.", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 29, 279, 40, 53, 68, 11, 404, 40, 790, 27, 7, 7, 4, 10, 10, 5, 74, 5, 9, 18, 7, 8, 14, 6, 6, 5 ]
[ "passage: TAGS\n#size_categories-10K<n<100K #rlfh #argilla #human-feedback #region-us \n### Dataset Summary\n\n\nIt contains the raw version of go\\_emotions as a 'FeedbackDataset'. Each of the original questions are defined a single\n'FeedbackRecord' and contain the 'responses' from each annotator. The final labels in the *simplified* version of the dataset have been used as 'suggestions', so that we\ncan use this dataset to showcase the metrics related to the agreement between annotators as well as the 'responses' vs 'suggestions' metrics.\n\n\nThis dataset contains:\n\n\n* A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\\_huggingface' method in Argilla.\n* Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\\_huggingface' and can be loaded independently using the 'datasets' library via 'load\\_dataset'.\n* The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla.### Load with Argilla\n\n\nTo load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code:### Load with 'datasets'\n\n\nTo load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code:### Supported Tasks and Leaderboards\n\n\nThis dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section.\n\n\nThere are no leaderboards associated with this dataset.### Languages\n\n\nDataset Structure\n-----------------", "passage: ### Data in Argilla\n\n\nThe dataset is created in Argilla with: fields, questions, suggestions, metadata, vectors, and guidelines.\n\n\nThe fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\nThe questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\\_selection, multi\\_label\\_selection, or ranking.\n\n\n\nThe suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending \"-suggestion\" and \"-suggestion-metadata\" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with \"-suggestion\" and the metadata is appended with \"-suggestion-metadata\".\n\n\nThe metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n\n\n\nThe guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section.### Data Instances\n\n\nAn example of a dataset instance in Argilla looks as follows:\n\n\nWhile the same record in HuggingFace 'datasets' looks as follows:" ]
053fab5a1f7b96b6cb7d1c96d5cc01079da1c04e
# 说明 使用第三方代码涉及到调用模型的时候,会不可避免的从huggingface下载模型 如果网络状况不良好的情况下会导致无法下载,而如果手动下载则需要一个个修改第三方代码使其读取本地模型的路径 故这里存储好对应的cache 目录,直接解压到 `~/.cache/huggingface/hub/` 路径下即可
Chars/symlink-models
[ "license:mit", "region:us" ]
2023-11-27T09:38:33+00:00
{"license": "mit"}
2023-11-27T10:09:52+00:00
[]
[]
TAGS #license-mit #region-us
# 说明 使用第三方代码涉及到调用模型的时候,会不可避免的从huggingface下载模型 如果网络状况不良好的情况下会导致无法下载,而如果手动下载则需要一个个修改第三方代码使其读取本地模型的路径 故这里存储好对应的cache 目录,直接解压到 '~/.cache/huggingface/hub/' 路径下即可
[ "# 说明\n使用第三方代码涉及到调用模型的时候,会不可避免的从huggingface下载模型\n\n如果网络状况不良好的情况下会导致无法下载,而如果手动下载则需要一个个修改第三方代码使其读取本地模型的路径\n\n故这里存储好对应的cache 目录,直接解压到 '~/.cache/huggingface/hub/' 路径下即可" ]
[ "TAGS\n#license-mit #region-us \n", "# 说明\n使用第三方代码涉及到调用模型的时候,会不可避免的从huggingface下载模型\n\n如果网络状况不良好的情况下会导致无法下载,而如果手动下载则需要一个个修改第三方代码使其读取本地模型的路径\n\n故这里存储好对应的cache 目录,直接解压到 '~/.cache/huggingface/hub/' 路径下即可" ]
[ 11, 84 ]
[ "passage: TAGS\n#license-mit #region-us \n# 说明\n使用第三方代码涉及到调用模型的时候,会不可避免的从huggingface下载模型\n\n如果网络状况不良好的情况下会导致无法下载,而如果手动下载则需要一个个修改第三方代码使其读取本地模型的路径\n\n故这里存储好对应的cache 目录,直接解压到 '~/.cache/huggingface/hub/' 路径下即可" ]
3508bdf443f42f350decece2558612de908bd672
# Dataset Card for "patient_info" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Elfsong/patient_info
[ "region:us" ]
2023-11-27T09:56:27+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "anxiety", "path": "data/anxiety-*"}, {"split": "depression", "path": "data/depression-*"}, {"split": "ptsd", "path": "data/ptsd-*"}, {"split": "bipolar", "path": "data/bipolar-*"}, {"split": "substance_misuse", "path": "data/substance_misuse-*"}, {"split": "eating_disorders", "path": "data/eating_disorders-*"}, {"split": "alcohol_consumption", "path": "data/alcohol_consumption-*"}]}], "dataset_info": {"features": [{"name": "url", "dtype": "string"}, {"name": "comments", "list": [{"name": "author_from", "sequence": "string"}, {"name": "author_to", "sequence": "string"}, {"name": "comments", "list": [{"name": "author_from", "sequence": "string"}, {"name": "author_to", "sequence": "string"}, {"name": "content", "sequence": "string"}, {"name": "date", "sequence": "string"}]}, {"name": "content", "sequence": "string"}, {"name": "date", "sequence": "string"}, {"name": "url", "dtype": "string"}]}, {"name": "title", "dtype": "string"}, {"name": "date", "dtype": "string"}, {"name": "content", "dtype": "string"}, {"name": "author", "dtype": "string"}], "splits": [{"name": "anxiety", "num_bytes": 143006120, "num_examples": 27393}, {"name": "depression", "num_bytes": 49953142, "num_examples": 6982}, {"name": "ptsd", "num_bytes": 1626957, "num_examples": 349}, {"name": "bipolar", "num_bytes": 3087512, "num_examples": 474}, {"name": "substance_misuse", "num_bytes": 1406369, "num_examples": 195}, {"name": "eating_disorders", "num_bytes": 1294592, "num_examples": 233}, {"name": "alcohol_consumption", "num_bytes": 21540333, "num_examples": 1855}], "download_size": 109169290, "dataset_size": 221915025}}
2023-11-27T09:57:08+00:00
[]
[]
TAGS #region-us
# Dataset Card for "patient_info" More Information needed
[ "# Dataset Card for \"patient_info\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"patient_info\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"patient_info\"\n\nMore Information needed" ]
4ac0b471c29c09960ac3163096162a7e21b0d5f0
# Dataset Card for "seven_cups" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Elfsong/seven_cups
[ "region:us" ]
2023-11-27T09:58:50+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "anxiety", "path": "data/anxiety-*"}, {"split": "bipolar", "path": "data/bipolar-*"}, {"split": "depression", "path": "data/depression-*"}, {"split": "personalitydisorders", "path": "data/personalitydisorders-*"}, {"split": "trauma", "path": "data/trauma-*"}, {"split": "eds", "path": "data/eds-*"}, {"split": "substanceaddiction", "path": "data/substanceaddiction-*"}, {"split": "relationships", "path": "data/relationships-*"}]}], "dataset_info": {"features": [{"name": "lead_post", "struct": [{"name": "author", "dtype": "string"}, {"name": "content", "dtype": "string"}, {"name": "date", "dtype": "string"}, {"name": "thread_id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "topic", "dtype": "string"}, {"name": "url", "dtype": "string"}]}, {"name": "comment_posts", "list": [{"name": "author", "dtype": "string"}, {"name": "content", "dtype": "string"}, {"name": "parent_ids", "sequence": "string"}, {"name": "post_id", "dtype": "string"}, {"name": "thread_id", "dtype": "string"}, {"name": "url", "dtype": "string"}]}], "splits": [{"name": "anxiety", "num_bytes": 24332055, "num_examples": 7948}, {"name": "bipolar", "num_bytes": 3496018, "num_examples": 1033}, {"name": "depression", "num_bytes": 59927557, "num_examples": 10243}, {"name": "personalitydisorders", "num_bytes": 9791687, "num_examples": 1854}, {"name": "trauma", "num_bytes": 53211657, "num_examples": 5763}, {"name": "eds", "num_bytes": 9837092, "num_examples": 2382}, {"name": "substanceaddiction", "num_bytes": 1957813, "num_examples": 687}, {"name": "relationships", "num_bytes": 56187112, "num_examples": 12652}], "download_size": 94273903, "dataset_size": 218740991}}
2023-11-27T09:59:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for "seven_cups" More Information needed
[ "# Dataset Card for \"seven_cups\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"seven_cups\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"seven_cups\"\n\nMore Information needed" ]