sha
stringlengths
40
40
text
stringlengths
1
13.4M
id
stringlengths
2
117
tags
listlengths
1
7.91k
created_at
stringlengths
25
25
metadata
stringlengths
2
875k
last_modified
stringlengths
25
25
arxiv
listlengths
0
25
languages
listlengths
0
7.91k
tags_str
stringlengths
17
159k
text_str
stringlengths
1
447k
text_lists
listlengths
0
352
processed_texts
listlengths
1
353
tokens_length
listlengths
1
353
input_texts
listlengths
1
40
3eb883809c6c3b3234d51cb7e54e8a49f458b058
# Dataset Card for "wikipedia_vi" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
phatjk/wikipedia_vi
[ "region:us" ]
2023-10-14T04:55:38+00:00
{"dataset_info": {"features": [{"name": "title", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "bm25_text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2889457164, "num_examples": 1944406}], "download_size": 1242752879, "dataset_size": 2889457164}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-14T04:56:36+00:00
[]
[]
TAGS #region-us
# Dataset Card for "wikipedia_vi" More Information needed
[ "# Dataset Card for \"wikipedia_vi\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"wikipedia_vi\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"wikipedia_vi\"\n\nMore Information needed" ]
8ed7857758a29f9a91315a888ad34bd5fc72d5a7
# Dataset Card for "wikipedia_vi_qa" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
phatjk/wikipedia_vi_qa
[ "region:us" ]
2023-10-14T05:32:05+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 8523200, "num_examples": 20107}], "download_size": 4759406, "dataset_size": 8523200}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-14T05:32:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "wikipedia_vi_qa" More Information needed
[ "# Dataset Card for \"wikipedia_vi_qa\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"wikipedia_vi_qa\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"wikipedia_vi_qa\"\n\nMore Information needed" ]
b755e331bf8ab65125d4991a16df8c0efc81217d
# Dataset Card for "rbrt_test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
carnival13/rbrt_test
[ "region:us" ]
2023-10-14T05:48:17+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 1270137685, "num_examples": 900000}], "download_size": 282453475, "dataset_size": 1270137685}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-14T05:49:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rbrt_test" More Information needed
[ "# Dataset Card for \"rbrt_test\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rbrt_test\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rbrt_test\"\n\nMore Information needed" ]
89180104a1fcce530b11081dc7f6b3a09045df06
# Dataset Card for "rbrt_eval_lrg" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
carnival13/rbrt_eval_lrg
[ "region:us" ]
2023-10-14T06:26:34+00:00
{"dataset_info": {"features": [{"name": "domain_label", "dtype": "int64"}, {"name": "pass_label", "dtype": "int64"}, {"name": "input", "dtype": "string"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 18878517, "num_examples": 11590}], "download_size": 5852585, "dataset_size": 18878517}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-14T06:26:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rbrt_eval_lrg" More Information needed
[ "# Dataset Card for \"rbrt_eval_lrg\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rbrt_eval_lrg\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rbrt_eval_lrg\"\n\nMore Information needed" ]
8c7354be9027f0a099edc02c634905f0280dbf88
# Dataset Card for "rbrt_test_lrg" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
carnival13/rbrt_test_lrg
[ "region:us" ]
2023-10-14T06:28:04+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 1270137685, "num_examples": 900000}], "download_size": 282453475, "dataset_size": 1270137685}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-14T06:28:53+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rbrt_test_lrg" More Information needed
[ "# Dataset Card for \"rbrt_test_lrg\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rbrt_test_lrg\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rbrt_test_lrg\"\n\nMore Information needed" ]
aea4e5b27b8a9c77ff5151682f4dc9e9004bab73
# Dataset Card for "rbrt_lrg_trn" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
carnival13/rbrt_lrg_trn
[ "region:us" ]
2023-10-14T06:28:19+00:00
{"dataset_info": {"features": [{"name": "domain_label", "dtype": "int64"}, {"name": "pass_label", "dtype": "int64"}, {"name": "input", "dtype": "string"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 418128930, "num_examples": 339120}], "download_size": 121309096, "dataset_size": 418128930}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-14T06:28:37+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rbrt_lrg_trn" More Information needed
[ "# Dataset Card for \"rbrt_lrg_trn\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rbrt_lrg_trn\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rbrt_lrg_trn\"\n\nMore Information needed" ]
2ef184410c8484b5cfcde15936a67d9b017f9fcf
# Dataset Card for "lm_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
likhith45688/lm_dataset
[ "region:us" ]
2023-10-14T06:31:58+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "valid", "path": "data/valid-*"}]}], "dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 606341604, "num_examples": 361779}, {"name": "valid", "num_bytes": 144454440, "num_examples": 86190}], "download_size": 137305987, "dataset_size": 750796044}}
2023-10-14T06:32:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for "lm_dataset" More Information needed
[ "# Dataset Card for \"lm_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"lm_dataset\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"lm_dataset\"\n\nMore Information needed" ]
1ddb0b67aa49fd6eddf6ce10608f07ba35e7d78c
# Dataset Card for "violet-evergarden-ds" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
shellypeng/violet-evergarden-ds
[ "region:us" ]
2023-10-14T06:38:53+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 446708153.158, "num_examples": 3823}], "download_size": 478066266, "dataset_size": 446708153.158}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-14T11:02:00+00:00
[]
[]
TAGS #region-us
# Dataset Card for "violet-evergarden-ds" More Information needed
[ "# Dataset Card for \"violet-evergarden-ds\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"violet-evergarden-ds\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"violet-evergarden-ds\"\n\nMore Information needed" ]
3bdb78f5ef206c77a7677c1dd4d3d2de6644ee77
# Dataset Card for Dataset Name <!-- Provide a quick summary of the dataset. --> This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
sagorhishab/demo_data
[ "task_categories:text-generation", "language:bn", "license:mit", "region:us" ]
2023-10-14T07:02:21+00:00
{"language": ["bn"], "license": "mit", "task_categories": ["text-generation"]}
2023-10-14T07:06:10+00:00
[]
[ "bn" ]
TAGS #task_categories-text-generation #language-Bengali #license-mit #region-us
# Dataset Card for Dataset Name This dataset card aims to be a base template for new datasets. It has been generated using this raw template. ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#task_categories-text-generation #language-Bengali #license-mit #region-us \n", "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 27, 34, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#task_categories-text-generation #language-Bengali #license-mit #region-us \n# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
c9068bf84c1b2e2cbd96f13d9db9160e336b5c77
# Dataset Card for "hc3-wiki-intro-tokenized-max-len-512" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
rajendrabaskota/hc3-wiki-intro-tokenized-max-len-512
[ "region:us" ]
2023-10-14T10:07:46+00:00
{"dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "source", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 775237004, "num_examples": 330347}, {"name": "test", "num_bytes": 40840334, "num_examples": 17387}], "download_size": 429915523, "dataset_size": 816077338}}
2023-10-14T10:08:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "hc3-wiki-intro-tokenized-max-len-512" More Information needed
[ "# Dataset Card for \"hc3-wiki-intro-tokenized-max-len-512\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"hc3-wiki-intro-tokenized-max-len-512\"\n\nMore Information needed" ]
[ 6, 26 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"hc3-wiki-intro-tokenized-max-len-512\"\n\nMore Information needed" ]
06ec90d7e7a75eff55aefb3b05ce344b3d0b01dd
# Dataset Card for "synth_code_preference_20k" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
pvduy/synth_code_preference_20k
[ "region:us" ]
2023-10-14T10:42:25+00:00
{"dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "selected", "dtype": "string"}, {"name": "rejected", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 75033356, "num_examples": 20910}], "download_size": 16397343, "dataset_size": 75033356}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-14T10:42:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for "synth_code_preference_20k" More Information needed
[ "# Dataset Card for \"synth_code_preference_20k\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"synth_code_preference_20k\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"synth_code_preference_20k\"\n\nMore Information needed" ]
c3a3e644443c227141089070499c7af1e80dd2b5
# Dataset Card for "rbrt_eval_sur" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
carnival13/rbrt_eval_sur
[ "region:us" ]
2023-10-14T10:42:36+00:00
{"dataset_info": {"features": [{"name": "domain_label", "dtype": "int64"}, {"name": "pass_label", "dtype": "int64"}, {"name": "input", "dtype": "string"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 13846819, "num_examples": 6970}], "download_size": 3906257, "dataset_size": 13846819}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-14T10:42:41+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rbrt_eval_sur" More Information needed
[ "# Dataset Card for \"rbrt_eval_sur\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rbrt_eval_sur\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rbrt_eval_sur\"\n\nMore Information needed" ]
1210ffee6c1dd1a48dd7602febb5e8eb4b12ae22
# Dataset Card for "linge-ping-1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
anujpaudel/linge-ping-1
[ "region:us" ]
2023-10-14T11:00:20+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 6525269.0, "num_examples": 159}], "download_size": 6003377, "dataset_size": 6525269.0}}
2023-10-14T11:08:15+00:00
[]
[]
TAGS #region-us
# Dataset Card for "linge-ping-1" More Information needed
[ "# Dataset Card for \"linge-ping-1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"linge-ping-1\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"linge-ping-1\"\n\nMore Information needed" ]
4764d24c08fa308a07289835aa6d6ead5fa176ed
# Dataset Card for "rbrt_uda_trn" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
carnival13/rbrt_uda_trn
[ "region:us" ]
2023-10-14T11:02:11+00:00
{"dataset_info": {"features": [{"name": "domain_label", "dtype": "int64"}, {"name": "pass_label", "dtype": "int64"}, {"name": "input", "dtype": "string"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 1115662838, "num_examples": 755110}], "download_size": 352431197, "dataset_size": 1115662838}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-15T01:10:12+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rbrt_uda_trn" More Information needed
[ "# Dataset Card for \"rbrt_uda_trn\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rbrt_uda_trn\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rbrt_uda_trn\"\n\nMore Information needed" ]
718062d6a2abc6c976b9e60966cbced83803f8b7
# Dataset Card for "wikipedia-augmented-chunked" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
legacy107/wikipedia-augmented-chunked
[ "region:us" ]
2023-10-14T11:06:10+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "History", "sequence": "string"}, {"name": "QuAC_dialog_id", "dtype": "string"}, {"name": "Question", "dtype": "string"}, {"name": "Question_no", "dtype": "int64"}, {"name": "Rewrite", "dtype": "string"}, {"name": "true_page_title", "dtype": "string"}, {"name": "true_contexts", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "true_contexts_wiki", "dtype": "string"}, {"name": "extractive", "dtype": "bool"}, {"name": "retrieved_contexts", "sequence": "string"}, {"name": "chunked_article", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 1778351216, "num_examples": 17183}, {"name": "test", "num_bytes": 315554804, "num_examples": 2882}], "download_size": 612111820, "dataset_size": 2093906020}}
2023-10-14T11:06:43+00:00
[]
[]
TAGS #region-us
# Dataset Card for "wikipedia-augmented-chunked" More Information needed
[ "# Dataset Card for \"wikipedia-augmented-chunked\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"wikipedia-augmented-chunked\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"wikipedia-augmented-chunked\"\n\nMore Information needed" ]
f85ed0aee92f91338ad043d45782aaf4a16bfcc1
# Dataset Card for "funsd_plus" ## Table of Contents - [Dataset Description](#dataset-description) - [Homepage](#homepage) - [Point of Contact](#point-of-contact) - [Languages](#languages) - [Additional Information](#additional-information) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** [FUNSD+ | A larger and revised FUNSD dataset by Konfuzio](https://konfuzio.com/en/funsd-plus/) - **Point of Contact:** [[email protected]](mailto:[email protected]) - **Languages:** `English` ## Additional Information ### Licensing Information [FUNSD+ license](https://huggingface.co/datasets/konfuzio/funsd_plus/blob/main/LICENSE) ### Citation Information ``` @misc{zagami_helm_2022, title = {FUNSD+: A larger and revised FUNSD dataset}, author = {Zagami, Davide and Helm, Christopher}, year = 2022, month = {Oct}, journal = {FUNSD+ | A larger and revised FUNSD dataset}, publisher = {Helm & Nagel GmbH}, url = {http://konfuzio.com/funsd-plus/} } ```
konfuzio/funsd_plus
[ "size_categories:1K<n<10K", "language:en", "license:other", "funsd", "region:us" ]
2023-10-14T11:31:04+00:00
{"language": ["en"], "license": "other", "size_categories": ["1K<n<10K"], "pretty_name": "FUNSD+", "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "words", "sequence": "string"}, {"name": "bboxes", "sequence": {"sequence": "float64"}}, {"name": "labels", "sequence": "int64"}, {"name": "grouped_words", "sequence": {"sequence": "int64"}}, {"name": "linked_groups", "sequence": {"sequence": "int64"}}], "splits": [{"name": "train", "num_bytes": 183288640.158, "num_examples": 1026}, {"name": "test", "num_bytes": 20706650, "num_examples": 113}], "download_size": 195177090, "dataset_size": 203995290.158}, "extra_gated_prompt": "You agree to not attempt to determine the identity of individuals in this dataset. You agree to the terms and conditions of the [FUNSD+ license](https://huggingface.co/datasets/konfuzio/funsd_plus/blob/main/LICENSE).", "extra_gated_fields": {"Name": "text", "Company": "text", "Country": "text", "Email": "text", "I agree to the terms and conditions of the FUNSD+ license": "checkbox"}, "tags": ["funsd"]}
2023-10-16T08:33:20+00:00
[]
[ "en" ]
TAGS #size_categories-1K<n<10K #language-English #license-other #funsd #region-us
# Dataset Card for "funsd_plus" ## Table of Contents - Dataset Description - Homepage - Point of Contact - Languages - Additional Information - Licensing Information - Citation Information ## Dataset Description - Homepage: FUNSD+ | A larger and revised FUNSD dataset by Konfuzio - Point of Contact: URL@URL - Languages: 'English' ## Additional Information ### Licensing Information FUNSD+ license
[ "# Dataset Card for \"funsd_plus\"", "## Table of Contents\n- Dataset Description\n - Homepage\n - Point of Contact\n - Languages\n- Additional Information\n - Licensing Information\n - Citation Information", "## Dataset Description\n\n- Homepage: FUNSD+ | A larger and revised FUNSD dataset by Konfuzio\n- Point of Contact: URL@URL\n- Languages: 'English'", "## Additional Information", "### Licensing Information\n\nFUNSD+ license" ]
[ "TAGS\n#size_categories-1K<n<10K #language-English #license-other #funsd #region-us \n", "# Dataset Card for \"funsd_plus\"", "## Table of Contents\n- Dataset Description\n - Homepage\n - Point of Contact\n - Languages\n- Additional Information\n - Licensing Information\n - Citation Information", "## Dataset Description\n\n- Homepage: FUNSD+ | A larger and revised FUNSD dataset by Konfuzio\n- Point of Contact: URL@URL\n- Languages: 'English'", "## Additional Information", "### Licensing Information\n\nFUNSD+ license" ]
[ 31, 12, 32, 40, 5, 10 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #language-English #license-other #funsd #region-us \n# Dataset Card for \"funsd_plus\"## Table of Contents\n- Dataset Description\n - Homepage\n - Point of Contact\n - Languages\n- Additional Information\n - Licensing Information\n - Citation Information## Dataset Description\n\n- Homepage: FUNSD+ | A larger and revised FUNSD dataset by Konfuzio\n- Point of Contact: URL@URL\n- Languages: 'English'## Additional Information### Licensing Information\n\nFUNSD+ license" ]
5165aa479b7f49a61a727478ea97b76414c5635d
# Dataset Card for "Small-Instruction-tuning-dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
PiyushLavaniya/Small-Instruction-tuning-dataset
[ "region:us" ]
2023-10-14T11:35:03+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 147200.32363636364, "num_examples": 247}, {"name": "test", "num_bytes": 16686.676363636365, "num_examples": 28}], "download_size": 26802, "dataset_size": 163887.0}}
2023-10-14T11:36:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Small-Instruction-tuning-dataset" More Information needed
[ "# Dataset Card for \"Small-Instruction-tuning-dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Small-Instruction-tuning-dataset\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"Small-Instruction-tuning-dataset\"\n\nMore Information needed" ]
73872f627da275ae750be595554a42ddb5fd2739
# Dataset Card for "rbrt_uda_large_ep13" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
carnival13/rbrt_uda_large_ep13
[ "region:us" ]
2023-10-14T11:45:08+00:00
{"dataset_info": {"features": [{"name": "domain_label", "dtype": "int64"}, {"name": "pass_label", "dtype": "int64"}, {"name": "input", "dtype": "string"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 1115662838, "num_examples": 755110}], "download_size": 352431197, "dataset_size": 1115662838}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-15T01:12:29+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rbrt_uda_large_ep13" More Information needed
[ "# Dataset Card for \"rbrt_uda_large_ep13\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rbrt_uda_large_ep13\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rbrt_uda_large_ep13\"\n\nMore Information needed" ]
bd522aab2d1c0dd81ec3ddf29342416c8964341f
> Dataset is downloaded from [here](https://ceit.aut.ac.ir/~keyvanrad/download/ML971/project/) which was provided at Amirkabir University of Technology. > The datas then labeled by the authors. > Experimental results show that the fine-tuned model works well in Persian License Plate.
hezarai/persian-license-plate-v1
[ "task_categories:image-to-text", "language:fa", "region:us" ]
2023-10-14T11:56:01+00:00
{"language": ["fa"], "task_categories": ["image-to-text"], "pretty_name": "PersianLicensePlate"}
2023-12-15T13:08:45+00:00
[]
[ "fa" ]
TAGS #task_categories-image-to-text #language-Persian #region-us
> Dataset is downloaded from here which was provided at Amirkabir University of Technology. > The datas then labeled by the authors. > Experimental results show that the fine-tuned model works well in Persian License Plate.
[]
[ "TAGS\n#task_categories-image-to-text #language-Persian #region-us \n" ]
[ 23 ]
[ "passage: TAGS\n#task_categories-image-to-text #language-Persian #region-us \n" ]
4d6ca514082a5a8e99801730b1a759b94dfdd821
# Dataset Card for "chinese_general_instruction_with_reward_score_judged_by_13B_baichuan2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
DialogueCharacter/chinese_general_instruction_with_reward_score_judged_by_13B_baichuan2
[ "region:us" ]
2023-10-14T12:21:11+00:00
{"dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "reward_score", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 1555344961, "num_examples": 1122934}], "download_size": 944071681, "dataset_size": 1555344961}}
2023-10-14T12:29:42+00:00
[]
[]
TAGS #region-us
# Dataset Card for "chinese_general_instruction_with_reward_score_judged_by_13B_baichuan2" More Information needed
[ "# Dataset Card for \"chinese_general_instruction_with_reward_score_judged_by_13B_baichuan2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"chinese_general_instruction_with_reward_score_judged_by_13B_baichuan2\"\n\nMore Information needed" ]
[ 6, 38 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"chinese_general_instruction_with_reward_score_judged_by_13B_baichuan2\"\n\nMore Information needed" ]
47fc3ffba1f533f22781cab7ddd098d3b8c0ba65
# Dataset Card for "chinese_dialogue_instruction_with_reward_score_judged_by_13B_baichuan2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
DialogueCharacter/chinese_dialogue_instruction_with_reward_score_judged_by_13B_baichuan2
[ "region:us" ]
2023-10-14T12:28:54+00:00
{"dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "reward_score", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 144603592, "num_examples": 110670}], "download_size": 83071987, "dataset_size": 144603592}}
2023-10-14T12:28:59+00:00
[]
[]
TAGS #region-us
# Dataset Card for "chinese_dialogue_instruction_with_reward_score_judged_by_13B_baichuan2" More Information needed
[ "# Dataset Card for \"chinese_dialogue_instruction_with_reward_score_judged_by_13B_baichuan2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"chinese_dialogue_instruction_with_reward_score_judged_by_13B_baichuan2\"\n\nMore Information needed" ]
[ 6, 40 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"chinese_dialogue_instruction_with_reward_score_judged_by_13B_baichuan2\"\n\nMore Information needed" ]
870eb166a8b1c3e7b506262959fd33a2c26a60f0
# Dataset Card for "Ashaar_diac" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
khalidalt/Ashaar_diac_1
[ "region:us" ]
2023-10-14T12:48:44+00:00
{"dataset_info": {"features": [{"name": "output", "dtype": "string"}, {"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 12159497, "num_examples": 23481}], "download_size": 6059483, "dataset_size": 12159497}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-14T12:55:59+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Ashaar_diac" More Information needed
[ "# Dataset Card for \"Ashaar_diac\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Ashaar_diac\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"Ashaar_diac\"\n\nMore Information needed" ]
760e0524c226bb064dc7daa09bc19f9fdbd927b4
# Dataset Card for "qa_wikipedia_augmented_sentence_transformer_negative_farming" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
legacy107/qa_wikipedia_augmented_sentence_transformer_negative_farming
[ "region:us" ]
2023-10-14T12:49:35+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "Question", "dtype": "string"}, {"name": "Question_no", "dtype": "int64"}, {"name": "Rewrite", "dtype": "string"}, {"name": "true_page_title", "dtype": "string"}, {"name": "negatives", "sequence": "string"}, {"name": "positive", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 22137737, "num_examples": 6000}, {"name": "validation", "num_bytes": 4290429, "num_examples": 1183}], "download_size": 9002671, "dataset_size": 26428166}}
2023-10-14T12:49:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for "qa_wikipedia_augmented_sentence_transformer_negative_farming" More Information needed
[ "# Dataset Card for \"qa_wikipedia_augmented_sentence_transformer_negative_farming\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"qa_wikipedia_augmented_sentence_transformer_negative_farming\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"qa_wikipedia_augmented_sentence_transformer_negative_farming\"\n\nMore Information needed" ]
c31b6deb1f962c5279f603910ac078b7e9808d75
# Dataset Card for "character_prompts" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Falah/character_prompts
[ "region:us" ]
2023-10-14T13:01:48+00:00
{"dataset_info": {"features": [{"name": "prompts", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 5947224, "num_examples": 10000}], "download_size": 686296, "dataset_size": 5947224}}
2023-10-14T13:01:49+00:00
[]
[]
TAGS #region-us
# Dataset Card for "character_prompts" More Information needed
[ "# Dataset Card for \"character_prompts\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"character_prompts\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"character_prompts\"\n\nMore Information needed" ]
44658e8ce94c123410811ab623c816b07eb564ef
# Dataset Card for "fourthbrain_synthetic_marketmail_gpt4" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
renatomoulin/fourthbrain_synthetic_marketmail_gpt4
[ "region:us" ]
2023-10-14T13:06:27+00:00
{"dataset_info": {"features": [{"name": "product", "dtype": "string"}, {"name": "description", "dtype": "string"}, {"name": "marketing_email", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 13145, "num_examples": 10}], "download_size": 18470, "dataset_size": 13145}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-14T13:12:56+00:00
[]
[]
TAGS #region-us
# Dataset Card for "fourthbrain_synthetic_marketmail_gpt4" More Information needed
[ "# Dataset Card for \"fourthbrain_synthetic_marketmail_gpt4\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"fourthbrain_synthetic_marketmail_gpt4\"\n\nMore Information needed" ]
[ 6, 25 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"fourthbrain_synthetic_marketmail_gpt4\"\n\nMore Information needed" ]
5b01887f5e8226dd031f31c733580ad5d63bd3a0
This is a test database. Please ignore.
TwoAbove/test-dalle-3
[ "language:en", "license:cc0-1.0", "image-text-dataset", "synthetic-dataset", "region:us" ]
2023-10-14T13:10:36+00:00
{"language": ["en"], "license": ["cc0-1.0"], "tags": ["image-text-dataset", "synthetic-dataset"], "dataset_info": {"features": [{"name": "caption", "dtype": "string"}, {"name": "image", "dtype": "image"}, {"name": "link", "dtype": "string"}, {"name": "message_id", "dtype": "string"}, {"name": "timestamp", "dtype": "string"}]}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-16T14:46:17+00:00
[]
[ "en" ]
TAGS #language-English #license-cc0-1.0 #image-text-dataset #synthetic-dataset #region-us
This is a test database. Please ignore.
[]
[ "TAGS\n#language-English #license-cc0-1.0 #image-text-dataset #synthetic-dataset #region-us \n" ]
[ 32 ]
[ "passage: TAGS\n#language-English #license-cc0-1.0 #image-text-dataset #synthetic-dataset #region-us \n" ]
750ba052a9ae5e0ecbe9d3fe17c896506a95fd5c
# Dataset Card for "thai_usembassy" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) This dataset collect all Thai & English news from [U.S. Embassy Bangkok](https://th.usembassy.gov/news-events/).
pythainlp/thai_usembassy
[ "task_categories:text-generation", "task_categories:translation", "language:th", "language:en", "license:cc0-1.0", "region:us" ]
2023-10-14T13:14:38+00:00
{"language": ["th", "en"], "license": "cc0-1.0", "task_categories": ["text-generation", "translation"], "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "url", "dtype": "string"}, {"name": "th", "dtype": "string"}, {"name": "en", "dtype": "string"}, {"name": "title_en", "dtype": "string"}, {"name": "title_th", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 5060813, "num_examples": 615}], "download_size": 2048306, "dataset_size": 5060813}}
2023-10-20T13:34:38+00:00
[]
[ "th", "en" ]
TAGS #task_categories-text-generation #task_categories-translation #language-Thai #language-English #license-cc0-1.0 #region-us
# Dataset Card for "thai_usembassy" More Information needed This dataset collect all Thai & English news from U.S. Embassy Bangkok.
[ "# Dataset Card for \"thai_usembassy\"\n\nMore Information needed\n\nThis dataset collect all Thai & English news from U.S. Embassy Bangkok." ]
[ "TAGS\n#task_categories-text-generation #task_categories-translation #language-Thai #language-English #license-cc0-1.0 #region-us \n", "# Dataset Card for \"thai_usembassy\"\n\nMore Information needed\n\nThis dataset collect all Thai & English news from U.S. Embassy Bangkok." ]
[ 43, 34 ]
[ "passage: TAGS\n#task_categories-text-generation #task_categories-translation #language-Thai #language-English #license-cc0-1.0 #region-us \n# Dataset Card for \"thai_usembassy\"\n\nMore Information needed\n\nThis dataset collect all Thai & English news from U.S. Embassy Bangkok." ]
ff8483b73742e63ccbbb4b584d15cdbc3c027c87
# Dataset Card for "fourthbrain_synthetic_marketmail_gpt35_turbo" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
renatomoulin/fourthbrain_synthetic_marketmail_gpt35_turbo
[ "region:us" ]
2023-10-14T13:16:31+00:00
{"dataset_info": {"features": [{"name": "product", "dtype": "string"}, {"name": "description", "dtype": "string"}, {"name": "marketing_email", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 21655, "num_examples": 10}], "download_size": 27332, "dataset_size": 21655}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-14T13:16:32+00:00
[]
[]
TAGS #region-us
# Dataset Card for "fourthbrain_synthetic_marketmail_gpt35_turbo" More Information needed
[ "# Dataset Card for \"fourthbrain_synthetic_marketmail_gpt35_turbo\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"fourthbrain_synthetic_marketmail_gpt35_turbo\"\n\nMore Information needed" ]
[ 6, 28 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"fourthbrain_synthetic_marketmail_gpt35_turbo\"\n\nMore Information needed" ]
25dd9bc5278725231b6cdd204200794c5ecf66ba
# Dataset Card for "cover-letter-dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ShashiVish/cover-letter-dataset
[ "region:us" ]
2023-10-14T13:37:08+00:00
{"dataset_info": {"features": [{"name": "Job Title", "dtype": "string"}, {"name": "Preferred Qualifications", "dtype": "string"}, {"name": "Hiring Company", "dtype": "string"}, {"name": "Applicant Name", "dtype": "string"}, {"name": "Past Working Experience", "dtype": "string"}, {"name": "Current Working Experience", "dtype": "string"}, {"name": "Skillsets", "dtype": "string"}, {"name": "Qualifications", "dtype": "string"}, {"name": "Cover Letter", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1048886.142857143, "num_examples": 813}, {"name": "test", "num_bytes": 450259.85714285716, "num_examples": 349}], "download_size": 507518, "dataset_size": 1499146.0}}
2023-10-15T14:20:47+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cover-letter-dataset" More Information needed
[ "# Dataset Card for \"cover-letter-dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cover-letter-dataset\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cover-letter-dataset\"\n\nMore Information needed" ]
df4fb3c3b055704df47951c106cf9ebd15bdf24f
# Dataset Card for "WMH_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
SAint7579/WMH_dataset
[ "region:us" ]
2023-10-14T13:44:38+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 21238990.0, "num_examples": 430}], "download_size": 21181913, "dataset_size": 21238990.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-21T20:46:46+00:00
[]
[]
TAGS #region-us
# Dataset Card for "WMH_dataset" More Information needed
[ "# Dataset Card for \"WMH_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"WMH_dataset\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"WMH_dataset\"\n\nMore Information needed" ]
52f6a80c67a6104fa871d197d5462aaae51d0c40
# Dataset Card for "artwork_prompts" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Falah/artwork_prompts
[ "region:us" ]
2023-10-14T13:56:10+00:00
{"dataset_info": {"features": [{"name": "prompts", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 5594305, "num_examples": 10000}], "download_size": 639738, "dataset_size": 5594305}}
2023-10-14T13:56:11+00:00
[]
[]
TAGS #region-us
# Dataset Card for "artwork_prompts" More Information needed
[ "# Dataset Card for \"artwork_prompts\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"artwork_prompts\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"artwork_prompts\"\n\nMore Information needed" ]
ea66fad56190e67d8c7e422676218aaa8b10b9e2
# Dataset Card for "test_privacy" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
MartinKu/test_privacy
[ "region:us" ]
2023-10-14T14:59:28+00:00
{"dataset_info": {"features": [{"name": "NAME", "dtype": "float64"}, {"name": "CATEGORY", "dtype": "float64"}, {"name": "ADDRESS", "dtype": "float64"}, {"name": "AGE", "dtype": "float64"}, {"name": "CREDIT_DEBIT_CVV", "dtype": "float64"}, {"name": "CREDIT_DEBIT_EXPIRY", "dtype": "float64"}, {"name": "CREDIT_DEBIT_NUMBER", "dtype": "float64"}, {"name": "DRIVER_ID", "dtype": "float64"}, {"name": "PHONE", "dtype": "float64"}, {"name": "PASSWORD", "dtype": "float64"}, {"name": "BANK_ACCOUNT_NUMBER", "dtype": "float64"}, {"name": "PASSPORT_NUMBER", "dtype": "float64"}, {"name": "SSN", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 0, "num_examples": 0}], "download_size": 3175, "dataset_size": 0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-14T15:37:31+00:00
[]
[]
TAGS #region-us
# Dataset Card for "test_privacy" More Information needed
[ "# Dataset Card for \"test_privacy\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"test_privacy\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"test_privacy\"\n\nMore Information needed" ]
d495b28b04048b27f892733eed9f45a518122df5
# Dataset Card for Evaluation run of golaxy/gogpt-560m ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/golaxy/gogpt-560m - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [golaxy/gogpt-560m](https://huggingface.co/golaxy/gogpt-560m) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_golaxy__gogpt-560m", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-10-14T16:13:28.692590](https://huggingface.co/datasets/open-llm-leaderboard/details_golaxy__gogpt-560m/blob/main/results_2023-10-14T16-13-28.692590.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "em": 0.0382760067114094, "em_stderr": 0.001964844510611307, "f1": 0.06699035234899327, "f1_stderr": 0.0021908023180713283, "acc": 0.2537490134175217, "acc_stderr": 0.00702545276061429 }, "harness|drop|3": { "em": 0.0382760067114094, "em_stderr": 0.001964844510611307, "f1": 0.06699035234899327, "f1_stderr": 0.0021908023180713283 }, "harness|gsm8k|5": { "acc": 0.0, "acc_stderr": 0.0 }, "harness|winogrande|5": { "acc": 0.5074980268350434, "acc_stderr": 0.01405090552122858 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_golaxy__gogpt-560m
[ "region:us" ]
2023-10-14T15:13:32+00:00
{"pretty_name": "Evaluation run of golaxy/gogpt-560m", "dataset_summary": "Dataset automatically created during the evaluation run of model [golaxy/gogpt-560m](https://huggingface.co/golaxy/gogpt-560m) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_golaxy__gogpt-560m\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-10-14T16:13:28.692590](https://huggingface.co/datasets/open-llm-leaderboard/details_golaxy__gogpt-560m/blob/main/results_2023-10-14T16-13-28.692590.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"em\": 0.0382760067114094,\n \"em_stderr\": 0.001964844510611307,\n \"f1\": 0.06699035234899327,\n \"f1_stderr\": 0.0021908023180713283,\n \"acc\": 0.2537490134175217,\n \"acc_stderr\": 0.00702545276061429\n },\n \"harness|drop|3\": {\n \"em\": 0.0382760067114094,\n \"em_stderr\": 0.001964844510611307,\n \"f1\": 0.06699035234899327,\n \"f1_stderr\": 0.0021908023180713283\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.0,\n \"acc_stderr\": 0.0\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.5074980268350434,\n \"acc_stderr\": 0.01405090552122858\n }\n}\n```", "repo_url": "https://huggingface.co/golaxy/gogpt-560m", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_drop_3", "data_files": [{"split": "2023_10_14T16_13_28.692590", "path": ["**/details_harness|drop|3_2023-10-14T16-13-28.692590.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-10-14T16-13-28.692590.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_10_14T16_13_28.692590", "path": ["**/details_harness|gsm8k|5_2023-10-14T16-13-28.692590.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-10-14T16-13-28.692590.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_10_14T16_13_28.692590", "path": ["**/details_harness|winogrande|5_2023-10-14T16-13-28.692590.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-10-14T16-13-28.692590.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_10_14T16_13_28.692590", "path": ["results_2023-10-14T16-13-28.692590.parquet"]}, {"split": "latest", "path": ["results_2023-10-14T16-13-28.692590.parquet"]}]}]}
2023-10-14T15:13:40+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of golaxy/gogpt-560m ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model golaxy/gogpt-560m on the Open LLM Leaderboard. The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-10-14T16:13:28.692590(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of golaxy/gogpt-560m", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model golaxy/gogpt-560m on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-14T16:13:28.692590(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of golaxy/gogpt-560m", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model golaxy/gogpt-560m on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-14T16:13:28.692590(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 18, 31, 166, 67, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of golaxy/gogpt-560m## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model golaxy/gogpt-560m on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-10-14T16:13:28.692590(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
132f1f5d40ef23dd7a74bbc5933e7961d7461bc6
# Dataset Card for "wiki_experts_data_transform_icl5dst" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ostapeno/qa-platy_icl5_clen128_maxD-1_maxC5000_0
[ "region:us" ]
2023-10-14T15:53:38+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "docno", "dtype": "string"}, {"name": "subject", "dtype": "string"}, {"name": "icl_examples", "sequence": "string"}, {"name": "author_instr", "dtype": "string"}, {"name": "instruction", "dtype": "string"}, {"name": "response", "dtype": "string"}, {"name": "author_response", "dtype": "string"}, {"name": "normalized_cumul_logprob_response", "dtype": "float64"}], "splits": [{"name": "formal_logic", "num_bytes": 8194431.408696578, "num_examples": 2891}, {"name": "machine_learning", "num_bytes": 10320278.367023258, "num_examples": 3641}, {"name": "global_facts", "num_bytes": 11244313.178242588, "num_examples": 3967}, {"name": "abstract_algebra", "num_bytes": 12179685.839906327, "num_examples": 4297}, {"name": "high_school_physics", "num_bytes": 11162113.762520624, "num_examples": 3938}, {"name": "college_biology", "num_bytes": 10462001.49757837, "num_examples": 3691}, {"name": "high_school_government_and_politics", "num_bytes": 10816309.323966151, "num_examples": 3816}, {"name": "prehistory", "num_bytes": 11082748.809409762, "num_examples": 3910}, {"name": "security_studies", "num_bytes": 9869598.811858002, "num_examples": 3482}, {"name": "sociology", "num_bytes": 11181955.000798339, "num_examples": 3945}], "download_size": 21363610, "dataset_size": 106513436.0}}
2023-10-14T15:53:52+00:00
[]
[]
TAGS #region-us
# Dataset Card for "wiki_experts_data_transform_icl5dst" More Information needed
[ "# Dataset Card for \"wiki_experts_data_transform_icl5dst\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"wiki_experts_data_transform_icl5dst\"\n\nMore Information needed" ]
[ 6, 24 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"wiki_experts_data_transform_icl5dst\"\n\nMore Information needed" ]
0203651875cdd462a2f81be3711604e556e23e3a
## max_context_length: 128 ## max_documents_per_subject: 1000
sordonia/id-maxD1000
[ "region:us" ]
2023-10-14T16:00:10+00:00
{}
2023-10-14T16:00:25+00:00
[]
[]
TAGS #region-us
## max_context_length: 128 ## max_documents_per_subject: 1000
[ "## max_context_length: 128", "## max_documents_per_subject: 1000" ]
[ "TAGS\n#region-us \n", "## max_context_length: 128", "## max_documents_per_subject: 1000" ]
[ 6, 10, 12 ]
[ "passage: TAGS\n#region-us \n## max_context_length: 128## max_documents_per_subject: 1000" ]
60bf07a567d72a07de1d1eb820cb3519459338b9
## max_context_length: 128 ## max_documents_per_subject: 500
sordonia/id-maxD500
[ "region:us" ]
2023-10-14T16:01:37+00:00
{}
2023-10-14T16:01:50+00:00
[]
[]
TAGS #region-us
## max_context_length: 128 ## max_documents_per_subject: 500
[ "## max_context_length: 128", "## max_documents_per_subject: 500" ]
[ "TAGS\n#region-us \n", "## max_context_length: 128", "## max_documents_per_subject: 500" ]
[ 6, 10, 12 ]
[ "passage: TAGS\n#region-us \n## max_context_length: 128## max_documents_per_subject: 500" ]
ebd1c658961c19443454c05dcc21879d8ad10bb3
# Dataset Card for "FrEn_handpicks" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ismailiismail/FrEn_handpicks
[ "region:us" ]
2023-10-14T16:06:15+00:00
{"dataset_info": {"features": [{"name": "French", "dtype": "string"}, {"name": "English", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 34126, "num_examples": 394}], "download_size": 16438, "dataset_size": 34126}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-14T18:55:36+00:00
[]
[]
TAGS #region-us
# Dataset Card for "FrEn_handpicks" More Information needed
[ "# Dataset Card for \"FrEn_handpicks\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"FrEn_handpicks\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"FrEn_handpicks\"\n\nMore Information needed" ]
d16bac9acbc0acda39cd6100693ba71065490aca
# Dataset Card for "finetunedata_short" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Rageshhf/finetunedata_short
[ "region:us" ]
2023-10-14T17:27:45+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 5318293, "num_examples": 3283}], "download_size": 1627953, "dataset_size": 5318293}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-14T17:30:18+00:00
[]
[]
TAGS #region-us
# Dataset Card for "finetunedata_short" More Information needed
[ "# Dataset Card for \"finetunedata_short\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"finetunedata_short\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"finetunedata_short\"\n\nMore Information needed" ]
a6de621b8137c16a85102cce3b56010b06e96380
# Dataset Card for "9f8a49b7" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
result-kand2-sdxl-wuerst-karlo/9f8a49b7
[ "region:us" ]
2023-10-14T18:04:21+00:00
{"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 235, "num_examples": 10}], "download_size": 1403, "dataset_size": 235}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-14T18:04:22+00:00
[]
[]
TAGS #region-us
# Dataset Card for "9f8a49b7" More Information needed
[ "# Dataset Card for \"9f8a49b7\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"9f8a49b7\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"9f8a49b7\"\n\nMore Information needed" ]
0aa1446856c60b57d66ec8fc3c71489cdd2ad374
# Dataset Card for "b745e329" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
result-kand2-sdxl-wuerst-karlo/b745e329
[ "region:us" ]
2023-10-14T18:04:24+00:00
{"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 235, "num_examples": 10}], "download_size": 1403, "dataset_size": 235}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-14T18:04:25+00:00
[]
[]
TAGS #region-us
# Dataset Card for "b745e329" More Information needed
[ "# Dataset Card for \"b745e329\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"b745e329\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"b745e329\"\n\nMore Information needed" ]
483119889f1734a095af311e88a0c509e3471836
# Dataset Card for Evaluation run of meta-llama/Llama-2-13b-chat-hf ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/meta-llama/Llama-2-13b-chat-hf - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [meta-llama/Llama-2-13b-chat-hf](https://huggingface.co/meta-llama/Llama-2-13b-chat-hf) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_meta-llama__Llama-2-13b-chat-hf", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-10-14T19:39:26.636545](https://huggingface.co/datasets/open-llm-leaderboard/details_meta-llama__Llama-2-13b-chat-hf/blob/main/results_2023-10-14T19-39-26.636545.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "em": 0.1782718120805369, "em_stderr": 0.003919630092588375, "f1": 0.2387195889261742, "f1_stderr": 0.003944947017182046, "acc": 0.448727630233375, "acc_stderr": 0.011074189612085313 }, "harness|drop|3": { "em": 0.1782718120805369, "em_stderr": 0.003919630092588375, "f1": 0.2387195889261742, "f1_stderr": 0.003944947017182046 }, "harness|gsm8k|5": { "acc": 0.15238817285822592, "acc_stderr": 0.009899572254794204 }, "harness|winogrande|5": { "acc": 0.745067087608524, "acc_stderr": 0.012248806969376422 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_meta-llama__Llama-2-13b-chat-hf
[ "region:us" ]
2023-10-14T18:39:30+00:00
{"pretty_name": "Evaluation run of meta-llama/Llama-2-13b-chat-hf", "dataset_summary": "Dataset automatically created during the evaluation run of model [meta-llama/Llama-2-13b-chat-hf](https://huggingface.co/meta-llama/Llama-2-13b-chat-hf) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_meta-llama__Llama-2-13b-chat-hf\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-10-14T19:39:26.636545](https://huggingface.co/datasets/open-llm-leaderboard/details_meta-llama__Llama-2-13b-chat-hf/blob/main/results_2023-10-14T19-39-26.636545.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"em\": 0.1782718120805369,\n \"em_stderr\": 0.003919630092588375,\n \"f1\": 0.2387195889261742,\n \"f1_stderr\": 0.003944947017182046,\n \"acc\": 0.448727630233375,\n \"acc_stderr\": 0.011074189612085313\n },\n \"harness|drop|3\": {\n \"em\": 0.1782718120805369,\n \"em_stderr\": 0.003919630092588375,\n \"f1\": 0.2387195889261742,\n \"f1_stderr\": 0.003944947017182046\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.15238817285822592,\n \"acc_stderr\": 0.009899572254794204\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.745067087608524,\n \"acc_stderr\": 0.012248806969376422\n }\n}\n```", "repo_url": "https://huggingface.co/meta-llama/Llama-2-13b-chat-hf", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_drop_3", "data_files": [{"split": "2023_10_14T19_39_26.636545", "path": ["**/details_harness|drop|3_2023-10-14T19-39-26.636545.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-10-14T19-39-26.636545.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_10_14T19_39_26.636545", "path": ["**/details_harness|gsm8k|5_2023-10-14T19-39-26.636545.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-10-14T19-39-26.636545.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_10_14T19_39_26.636545", "path": ["**/details_harness|winogrande|5_2023-10-14T19-39-26.636545.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-10-14T19-39-26.636545.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_10_14T19_39_26.636545", "path": ["results_2023-10-14T19-39-26.636545.parquet"]}, {"split": "latest", "path": ["results_2023-10-14T19-39-26.636545.parquet"]}]}]}
2023-10-14T18:39:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of meta-llama/Llama-2-13b-chat-hf ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model meta-llama/Llama-2-13b-chat-hf on the Open LLM Leaderboard. The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-10-14T19:39:26.636545(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of meta-llama/Llama-2-13b-chat-hf", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model meta-llama/Llama-2-13b-chat-hf on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-14T19:39:26.636545(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of meta-llama/Llama-2-13b-chat-hf", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model meta-llama/Llama-2-13b-chat-hf on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-14T19:39:26.636545(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 24, 31, 172, 67, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of meta-llama/Llama-2-13b-chat-hf## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model meta-llama/Llama-2-13b-chat-hf on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-10-14T19:39:26.636545(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
4290a4c1a3804bfc386043ba70314a996c2e108a
This is the KJV New Testament in JSON, grouped by pericope.
JWBickel/NewTestament_Pericopes
[ "size_categories:1K<n<10K", "language:en", "KJV Bible New Testament NT Pericope", "region:us" ]
2023-10-14T19:33:16+00:00
{"language": ["en"], "size_categories": ["1K<n<10K"], "pretty_name": "KJV NT by Pericope", "tags": ["KJV Bible New Testament NT Pericope"]}
2023-11-12T15:49:06+00:00
[]
[ "en" ]
TAGS #size_categories-1K<n<10K #language-English #KJV Bible New Testament NT Pericope #region-us
This is the KJV New Testament in JSON, grouped by pericope.
[]
[ "TAGS\n#size_categories-1K<n<10K #language-English #KJV Bible New Testament NT Pericope #region-us \n" ]
[ 33 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #language-English #KJV Bible New Testament NT Pericope #region-us \n" ]
fc3e7512174d1a71df86dc759b13f29d396799ec
# Dataset Card for Dataset Name <!-- Provide a quick summary of the dataset. --> This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
omarc/partial-asr
[ "task_categories:automatic-speech-recognition", "size_categories:10K<n<100K", "language:en", "license:mit", "partial-audio-transcripts", "automatic-speech-transcription", "whipser-small.en", "region:us" ]
2023-10-14T19:33:44+00:00
{"language": ["en"], "license": "mit", "size_categories": ["10K<n<100K"], "task_categories": ["automatic-speech-recognition"], "pretty_name": "Partially Removed 3-Best ASR Trancripts", "tags": ["partial-audio-transcripts", "automatic-speech-transcription", "whipser-small.en"]}
2023-10-15T18:15:21+00:00
[]
[ "en" ]
TAGS #task_categories-automatic-speech-recognition #size_categories-10K<n<100K #language-English #license-mit #partial-audio-transcripts #automatic-speech-transcription #whipser-small.en #region-us
# Dataset Card for Dataset Name This dataset card aims to be a base template for new datasets. It has been generated using this raw template. ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#task_categories-automatic-speech-recognition #size_categories-10K<n<100K #language-English #license-mit #partial-audio-transcripts #automatic-speech-transcription #whipser-small.en #region-us \n", "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 71, 34, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#task_categories-automatic-speech-recognition #size_categories-10K<n<100K #language-English #license-mit #partial-audio-transcripts #automatic-speech-transcription #whipser-small.en #region-us \n# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
6bbe7fb22934e321910259bd4895de645089cd0d
# Dataset Card for "dataverse_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
fia24/dataverse_dataset
[ "region:us" ]
2023-10-14T20:39:10+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "translation", "struct": [{"name": "en", "dtype": "string"}, {"name": "fr", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 6289213, "num_examples": 19799}, {"name": "test", "num_bytes": 696310, "num_examples": 2200}], "download_size": 3524483, "dataset_size": 6985523}}
2023-10-14T20:39:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "dataverse_dataset" More Information needed
[ "# Dataset Card for \"dataverse_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"dataverse_dataset\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"dataverse_dataset\"\n\nMore Information needed" ]
5ab9781071227a205bdfeb405dc5b13a2465ecae
# Dataset Card for Evaluation run of cerebras/Cerebras-GPT-590M ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/cerebras/Cerebras-GPT-590M - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [cerebras/Cerebras-GPT-590M](https://huggingface.co/cerebras/Cerebras-GPT-590M) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_cerebras__Cerebras-GPT-590M", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-10-14T22:11:07.408754](https://huggingface.co/datasets/open-llm-leaderboard/details_cerebras__Cerebras-GPT-590M/blob/main/results_2023-10-14T22-11-07.408754.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "em": 0.001153523489932886, "em_stderr": 0.00034761798968571054, "f1": 0.039916107382550345, "f1_stderr": 0.001153929680724628, "acc": 0.24300057504519282, "acc_stderr": 0.007948184376446 }, "harness|drop|3": { "em": 0.001153523489932886, "em_stderr": 0.00034761798968571054, "f1": 0.039916107382550345, "f1_stderr": 0.001153929680724628 }, "harness|gsm8k|5": { "acc": 0.004548900682335102, "acc_stderr": 0.0018535550440036204 }, "harness|winogrande|5": { "acc": 0.48145224940805054, "acc_stderr": 0.014042813708888378 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_cerebras__Cerebras-GPT-590M
[ "region:us" ]
2023-10-14T21:11:11+00:00
{"pretty_name": "Evaluation run of cerebras/Cerebras-GPT-590M", "dataset_summary": "Dataset automatically created during the evaluation run of model [cerebras/Cerebras-GPT-590M](https://huggingface.co/cerebras/Cerebras-GPT-590M) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_cerebras__Cerebras-GPT-590M\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-10-14T22:11:07.408754](https://huggingface.co/datasets/open-llm-leaderboard/details_cerebras__Cerebras-GPT-590M/blob/main/results_2023-10-14T22-11-07.408754.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"em\": 0.001153523489932886,\n \"em_stderr\": 0.00034761798968571054,\n \"f1\": 0.039916107382550345,\n \"f1_stderr\": 0.001153929680724628,\n \"acc\": 0.24300057504519282,\n \"acc_stderr\": 0.007948184376446\n },\n \"harness|drop|3\": {\n \"em\": 0.001153523489932886,\n \"em_stderr\": 0.00034761798968571054,\n \"f1\": 0.039916107382550345,\n \"f1_stderr\": 0.001153929680724628\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.004548900682335102,\n \"acc_stderr\": 0.0018535550440036204\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.48145224940805054,\n \"acc_stderr\": 0.014042813708888378\n }\n}\n```", "repo_url": "https://huggingface.co/cerebras/Cerebras-GPT-590M", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_drop_3", "data_files": [{"split": "2023_10_14T22_11_07.408754", "path": ["**/details_harness|drop|3_2023-10-14T22-11-07.408754.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-10-14T22-11-07.408754.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_10_14T22_11_07.408754", "path": ["**/details_harness|gsm8k|5_2023-10-14T22-11-07.408754.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-10-14T22-11-07.408754.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_10_14T22_11_07.408754", "path": ["**/details_harness|winogrande|5_2023-10-14T22-11-07.408754.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-10-14T22-11-07.408754.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_10_14T22_11_07.408754", "path": ["results_2023-10-14T22-11-07.408754.parquet"]}, {"split": "latest", "path": ["results_2023-10-14T22-11-07.408754.parquet"]}]}]}
2023-10-14T21:11:19+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of cerebras/Cerebras-GPT-590M ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model cerebras/Cerebras-GPT-590M on the Open LLM Leaderboard. The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-10-14T22:11:07.408754(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of cerebras/Cerebras-GPT-590M", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model cerebras/Cerebras-GPT-590M on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-14T22:11:07.408754(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of cerebras/Cerebras-GPT-590M", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model cerebras/Cerebras-GPT-590M on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-14T22:11:07.408754(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 21, 31, 169, 66, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of cerebras/Cerebras-GPT-590M## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model cerebras/Cerebras-GPT-590M on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-10-14T22:11:07.408754(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
3971ce8e79bd50a309a89fdbbb7fd6b9bef18226
# Dataset Card for "search" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
TheBossLevel123/search
[ "region:us" ]
2023-10-14T21:45:44+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 8048.0, "num_examples": 152}, {"name": "test", "num_bytes": 2012.0, "num_examples": 38}], "download_size": 8220, "dataset_size": 10060.0}}
2023-10-14T21:45:46+00:00
[]
[]
TAGS #region-us
# Dataset Card for "search" More Information needed
[ "# Dataset Card for \"search\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"search\"\n\nMore Information needed" ]
[ 6, 11 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"search\"\n\nMore Information needed" ]
878619ba5a55ff2c5d9e3dfcd14d02897ab4f2d8
# Dataset Card for "gpt2-augmentation1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mmmurf/gpt2-augmentation1
[ "region:us" ]
2023-10-14T22:01:27+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 260, "num_examples": 4}, {"name": "validation", "num_bytes": 261, "num_examples": 4}], "download_size": 2470, "dataset_size": 521}}
2023-10-14T22:01:29+00:00
[]
[]
TAGS #region-us
# Dataset Card for "gpt2-augmentation1" More Information needed
[ "# Dataset Card for \"gpt2-augmentation1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"gpt2-augmentation1\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"gpt2-augmentation1\"\n\nMore Information needed" ]
cd5965848e884b9da5ca19f58612fa6b9420b2e2
This is a collection of abc notation 'tunes' in the format of ``` {"text": "tune data"} ```
cvinker/abcnotation_examples
[ "region:us" ]
2023-10-14T22:32:31+00:00
{}
2023-10-14T22:35:39+00:00
[]
[]
TAGS #region-us
This is a collection of abc notation 'tunes' in the format of
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
ad9aeefcec3d04163425918da95e5d349282ea09
# Dataset Card for Evaluation run of edor/Platypus2-mini-7B ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/edor/Platypus2-mini-7B - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [edor/Platypus2-mini-7B](https://huggingface.co/edor/Platypus2-mini-7B) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_edor__Platypus2-mini-7B", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-10-15T00:02:17.687247](https://huggingface.co/datasets/open-llm-leaderboard/details_edor__Platypus2-mini-7B/blob/main/results_2023-10-15T00-02-17.687247.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "em": 0.037856543624161076, "em_stderr": 0.0019544746699158705, "f1": 0.09621854026845604, "f1_stderr": 0.00226406080927082, "acc": 0.4067747623974298, "acc_stderr": 0.0093991751789674 }, "harness|drop|3": { "em": 0.037856543624161076, "em_stderr": 0.0019544746699158705, "f1": 0.09621854026845604, "f1_stderr": 0.00226406080927082 }, "harness|gsm8k|5": { "acc": 0.0621683093252464, "acc_stderr": 0.006651035644531692 }, "harness|winogrande|5": { "acc": 0.7513812154696132, "acc_stderr": 0.012147314713403108 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_edor__Platypus2-mini-7B
[ "region:us" ]
2023-10-14T23:02:21+00:00
{"pretty_name": "Evaluation run of edor/Platypus2-mini-7B", "dataset_summary": "Dataset automatically created during the evaluation run of model [edor/Platypus2-mini-7B](https://huggingface.co/edor/Platypus2-mini-7B) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_edor__Platypus2-mini-7B\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-10-15T00:02:17.687247](https://huggingface.co/datasets/open-llm-leaderboard/details_edor__Platypus2-mini-7B/blob/main/results_2023-10-15T00-02-17.687247.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"em\": 0.037856543624161076,\n \"em_stderr\": 0.0019544746699158705,\n \"f1\": 0.09621854026845604,\n \"f1_stderr\": 0.00226406080927082,\n \"acc\": 0.4067747623974298,\n \"acc_stderr\": 0.0093991751789674\n },\n \"harness|drop|3\": {\n \"em\": 0.037856543624161076,\n \"em_stderr\": 0.0019544746699158705,\n \"f1\": 0.09621854026845604,\n \"f1_stderr\": 0.00226406080927082\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.0621683093252464,\n \"acc_stderr\": 0.006651035644531692\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.7513812154696132,\n \"acc_stderr\": 0.012147314713403108\n }\n}\n```", "repo_url": "https://huggingface.co/edor/Platypus2-mini-7B", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_drop_3", "data_files": [{"split": "2023_10_15T00_02_17.687247", "path": ["**/details_harness|drop|3_2023-10-15T00-02-17.687247.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-10-15T00-02-17.687247.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_10_15T00_02_17.687247", "path": ["**/details_harness|gsm8k|5_2023-10-15T00-02-17.687247.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-10-15T00-02-17.687247.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_10_15T00_02_17.687247", "path": ["**/details_harness|winogrande|5_2023-10-15T00-02-17.687247.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-10-15T00-02-17.687247.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_10_15T00_02_17.687247", "path": ["results_2023-10-15T00-02-17.687247.parquet"]}, {"split": "latest", "path": ["results_2023-10-15T00-02-17.687247.parquet"]}]}]}
2023-10-14T23:02:29+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of edor/Platypus2-mini-7B ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model edor/Platypus2-mini-7B on the Open LLM Leaderboard. The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-10-15T00:02:17.687247(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of edor/Platypus2-mini-7B", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model edor/Platypus2-mini-7B on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-15T00:02:17.687247(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of edor/Platypus2-mini-7B", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model edor/Platypus2-mini-7B on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-15T00:02:17.687247(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 19, 31, 167, 66, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of edor/Platypus2-mini-7B## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model edor/Platypus2-mini-7B on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-10-15T00:02:17.687247(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
79a5dc6500c231cfa94acdbfcad5dcd955545e40
# Dataset Card for "54b9ca8c" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
result-kand2-sdxl-wuerst-karlo/54b9ca8c
[ "region:us" ]
2023-10-14T23:28:11+00:00
{"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 167, "num_examples": 10}], "download_size": 1354, "dataset_size": 167}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-14T23:28:11+00:00
[]
[]
TAGS #region-us
# Dataset Card for "54b9ca8c" More Information needed
[ "# Dataset Card for \"54b9ca8c\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"54b9ca8c\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"54b9ca8c\"\n\nMore Information needed" ]
acb7f0ae9e247a8feb1f3a6d9e942a84b8df74a5
# Dataset Card for "light_illusion" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Nbardy/light_illusion
[ "region:us" ]
2023-10-15T00:55:29+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 39671500920.473, "num_examples": 14859}], "download_size": 43276966952, "dataset_size": 39671500920.473}}
2023-10-15T01:45:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "light_illusion" More Information needed
[ "# Dataset Card for \"light_illusion\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"light_illusion\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"light_illusion\"\n\nMore Information needed" ]
67e8bb669bdb183800783e441db401a5c6564606
# Dataset Card for "photo_geometric" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Nbardy/photo_geometric
[ "region:us" ]
2023-10-15T01:13:39+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 51470967349.009, "num_examples": 21381}], "download_size": 61304680550, "dataset_size": 51470967349.009}}
2023-10-15T02:16:05+00:00
[]
[]
TAGS #region-us
# Dataset Card for "photo_geometric" More Information needed
[ "# Dataset Card for \"photo_geometric\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"photo_geometric\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"photo_geometric\"\n\nMore Information needed" ]
814caa5d628a59ac882727b927308a64de591b85
# Dataset Card for "rbrt_eval_sur_lrg3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
carnival13/rbrt_eval_sur_lrg3
[ "region:us" ]
2023-10-15T01:32:50+00:00
{"dataset_info": {"features": [{"name": "domain_label", "dtype": "int64"}, {"name": "pass_label", "dtype": "int64"}, {"name": "input", "dtype": "string"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 13795820, "num_examples": 6970}], "download_size": 3884690, "dataset_size": 13795820}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-15T01:32:55+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rbrt_eval_sur_lrg3" More Information needed
[ "# Dataset Card for \"rbrt_eval_sur_lrg3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rbrt_eval_sur_lrg3\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rbrt_eval_sur_lrg3\"\n\nMore Information needed" ]
5230a2f206bc99f5bda7a25e8bfe69b5e4cbb999
# Dataset Card for Evaluation run of meta-llama/Llama-2-7b-chat-hf ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/meta-llama/Llama-2-7b-chat-hf - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_meta-llama__Llama-2-7b-chat-hf", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-10-15T02:34:15.484281](https://huggingface.co/datasets/open-llm-leaderboard/details_meta-llama__Llama-2-7b-chat-hf/blob/main/results_2023-10-15T02-34-15.484281.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "em": 0.06763842281879194, "em_stderr": 0.0025717489509556085, "f1": 0.13085570469798627, "f1_stderr": 0.0028825856446422905, "acc": 0.39549166962367155, "acc_stderr": 0.009921949302668327 }, "harness|drop|3": { "em": 0.06763842281879194, "em_stderr": 0.0025717489509556085, "f1": 0.13085570469798627, "f1_stderr": 0.0028825856446422905 }, "harness|gsm8k|5": { "acc": 0.07354056103108415, "acc_stderr": 0.0071898357543652685 }, "harness|winogrande|5": { "acc": 0.7174427782162589, "acc_stderr": 0.012654062850971384 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_meta-llama__Llama-2-7b-chat-hf
[ "region:us" ]
2023-10-15T01:34:19+00:00
{"pretty_name": "Evaluation run of meta-llama/Llama-2-7b-chat-hf", "dataset_summary": "Dataset automatically created during the evaluation run of model [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_meta-llama__Llama-2-7b-chat-hf\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-10-15T02:34:15.484281](https://huggingface.co/datasets/open-llm-leaderboard/details_meta-llama__Llama-2-7b-chat-hf/blob/main/results_2023-10-15T02-34-15.484281.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"em\": 0.06763842281879194,\n \"em_stderr\": 0.0025717489509556085,\n \"f1\": 0.13085570469798627,\n \"f1_stderr\": 0.0028825856446422905,\n \"acc\": 0.39549166962367155,\n \"acc_stderr\": 0.009921949302668327\n },\n \"harness|drop|3\": {\n \"em\": 0.06763842281879194,\n \"em_stderr\": 0.0025717489509556085,\n \"f1\": 0.13085570469798627,\n \"f1_stderr\": 0.0028825856446422905\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.07354056103108415,\n \"acc_stderr\": 0.0071898357543652685\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.7174427782162589,\n \"acc_stderr\": 0.012654062850971384\n }\n}\n```", "repo_url": "https://huggingface.co/meta-llama/Llama-2-7b-chat-hf", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_drop_3", "data_files": [{"split": "2023_10_15T02_34_15.484281", "path": ["**/details_harness|drop|3_2023-10-15T02-34-15.484281.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-10-15T02-34-15.484281.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_10_15T02_34_15.484281", "path": ["**/details_harness|gsm8k|5_2023-10-15T02-34-15.484281.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-10-15T02-34-15.484281.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_10_15T02_34_15.484281", "path": ["**/details_harness|winogrande|5_2023-10-15T02-34-15.484281.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-10-15T02-34-15.484281.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_10_15T02_34_15.484281", "path": ["results_2023-10-15T02-34-15.484281.parquet"]}, {"split": "latest", "path": ["results_2023-10-15T02-34-15.484281.parquet"]}]}]}
2023-10-15T01:34:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of meta-llama/Llama-2-7b-chat-hf ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model meta-llama/Llama-2-7b-chat-hf on the Open LLM Leaderboard. The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-10-15T02:34:15.484281(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of meta-llama/Llama-2-7b-chat-hf", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model meta-llama/Llama-2-7b-chat-hf on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-15T02:34:15.484281(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of meta-llama/Llama-2-7b-chat-hf", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model meta-llama/Llama-2-7b-chat-hf on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-15T02:34:15.484281(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 24, 31, 172, 67, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of meta-llama/Llama-2-7b-chat-hf## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model meta-llama/Llama-2-7b-chat-hf on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-10-15T02:34:15.484281(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
abbddc29b2850a5ac2b15785be2a2ec9b46c7335
# Dataset Card for "rbrt_full_uda_large_ep5" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
carnival13/rbrt_full_uda_large_ep5
[ "region:us" ]
2023-10-15T01:40:07+00:00
{"dataset_info": {"features": [{"name": "domain_label", "dtype": "int64"}, {"name": "pass_label", "dtype": "int64"}, {"name": "input", "dtype": "string"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 1219081708, "num_examples": 824810}], "download_size": 422786339, "dataset_size": 1219081708}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-15T02:08:18+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rbrt_full_uda_large_ep5" More Information needed
[ "# Dataset Card for \"rbrt_full_uda_large_ep5\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rbrt_full_uda_large_ep5\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rbrt_full_uda_large_ep5\"\n\nMore Information needed" ]
e52a777ae4c3a5da624860812f3b884cc0540d80
NOTICE: some of the game is mistakenly label as both length and width columns are 40, they are 30 actually. # maze This dataset contains 350,000 mazes, represents over 39.29 billion moves. Each maze is a 30x30 ASCII representation, with solutions derived using the BFS. It has two columns: - 'Maze': representation of maze in a list of string.shape is 30*30 - visual example <image src="https://cdn-uploads.huggingface.co/production/uploads/644b983f0fbe4830f192c4f5/BGplH40fK5wQzpofPocMK.png" alt="drawing" width="200"/> - 'Path': solution from start point to end point in a list of string, each item represent a position in the maze.
laion/strategic_game_maze
[ "license:cc-by-4.0", "region:us" ]
2023-10-15T01:44:07+00:00
{"license": "cc-by-4.0"}
2023-10-20T03:13:19+00:00
[]
[]
TAGS #license-cc-by-4.0 #region-us
NOTICE: some of the game is mistakenly label as both length and width columns are 40, they are 30 actually. # maze This dataset contains 350,000 mazes, represents over 39.29 billion moves. Each maze is a 30x30 ASCII representation, with solutions derived using the BFS. It has two columns: - 'Maze': representation of maze in a list of URL is 30*30 - visual example <image src="URL alt="drawing" width="200"/> - 'Path': solution from start point to end point in a list of string, each item represent a position in the maze.
[ "# maze\nThis dataset contains 350,000 mazes, represents over 39.29 billion moves. \nEach maze is a 30x30 ASCII representation, with solutions derived using the BFS.\nIt has two columns:\n- 'Maze': representation of maze in a list of URL is 30*30\n - visual example\n <image src=\"URL alt=\"drawing\" width=\"200\"/>\n- 'Path': solution from start point to end point in a list of string, each item represent a position in the maze." ]
[ "TAGS\n#license-cc-by-4.0 #region-us \n", "# maze\nThis dataset contains 350,000 mazes, represents over 39.29 billion moves. \nEach maze is a 30x30 ASCII representation, with solutions derived using the BFS.\nIt has two columns:\n- 'Maze': representation of maze in a list of URL is 30*30\n - visual example\n <image src=\"URL alt=\"drawing\" width=\"200\"/>\n- 'Path': solution from start point to end point in a list of string, each item represent a position in the maze." ]
[ 15, 122 ]
[ "passage: TAGS\n#license-cc-by-4.0 #region-us \n# maze\nThis dataset contains 350,000 mazes, represents over 39.29 billion moves. \nEach maze is a 30x30 ASCII representation, with solutions derived using the BFS.\nIt has two columns:\n- 'Maze': representation of maze in a list of URL is 30*30\n - visual example\n <image src=\"URL alt=\"drawing\" width=\"200\"/>\n- 'Path': solution from start point to end point in a list of string, each item represent a position in the maze." ]
e8fb91e58db3ab970092ac7b351eddb2435e65d3
# Dataset Card for "rbrt_eval_sur_full_lrg" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
carnival13/rbrt_eval_sur_full_lrg
[ "region:us" ]
2023-10-15T02:08:22+00:00
{"dataset_info": {"features": [{"name": "domain_label", "dtype": "int64"}, {"name": "pass_label", "dtype": "int64"}, {"name": "input", "dtype": "string"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 58030544, "num_examples": 22480}], "download_size": 16743699, "dataset_size": 58030544}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-15T02:08:29+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rbrt_eval_sur_full_lrg" More Information needed
[ "# Dataset Card for \"rbrt_eval_sur_full_lrg\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rbrt_eval_sur_full_lrg\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rbrt_eval_sur_full_lrg\"\n\nMore Information needed" ]
178bdb6ef9f5a9e533d101d559eb06994f7b58b3
# Dataset Card for "squad-cmrc2018-zhtw" ## 資料集摘要 [CMRC 2018](https://hfl-rc.github.io/cmrc2018/) 是第二屆「訊飛盃」中文機器閱讀理解頒獎研討會(CMRC 2018)中相關競賽所使用的資料集。 它主要用於中文機器閱讀理解的跨度提取資料集,以增加該領域的語言多樣性。該資料集由人類專家在維基百科段落上註釋的近 20,000 個真實問題組成。 同時它也註釋了一個挑戰集,其中包含需要在整個上下文中進行全面理解和多句推理的問題。 原始資料來源: - https://hfl-rc.github.io/cmrc2018/ - https://github.com/ymcui/cmrc2018 ## 資料下載清理 1. 下載 [cmrc2018](https://huggingface.co/datasets/cmrc2018) 資料集 2. 使用 [OpenCC](https://github.com/yichen0831/opencc-python) 來進行簡繁轉換 3. 使用 Python 正規表示式來清理一些殘留在 `context`, `question`, `answer` 的不必要字元 4. 根據 `answers.text` 來重新計算 `answers.answer_start` 的字元位置 5. 使用 Huggingface Datasets 來上傳至 Huggingface Hub ## 資料集結構 範例如下: ``` { "id":"DEV_1889_QUERY_0", "context":"巴士底廣場是法國首都巴黎的一個廣場是法國大革命的重要紀念地方。過去是巴士底獄所在地直到攻佔巴士底獄隨後在法國革命期間的1789年7月14日到1790年7月14日之間被徹底破壞沒有留下任何痕跡。這個廣場跨巴黎市的3個區:第四區、第十一區和第十二區。這個廣場和周邊地區簡稱為“巴士底”。立於廣場中心的七月圓柱由路易-菲利普一世興建於1833年到1840年是為了紀念1830年的七月革命。其他顯著的特徵包括巴士底歌劇院、巴士底地鐵站以及一段聖馬丁運河。在1984年以前歌劇院所在的地方曾經是巴士底火車站。這個廣場經常舉辦音樂會或類似活動。巴士底的東北部擁有許多咖啡館、酒吧、夜總會和音樂廳夜生活頗為熱鬧。由於這個廣場具有相當的歷史意義也經常用於政治示威包括大規模的2006年3月28日法國勞工抗議。在巴士底廣場交匯的道路有聖安託萬路、聖安託萬市郊路、亨利四世大道、里昂路、勒努瓦大道、博馬舍大道等。", "question":"巴士底廣場是哪場革命的重要紀念地方?", "answers":{ "text":[ "法國大革命" ], "answer_start":[ 18 ] } } ``` ## 資料欄位 所有配置(Split)的資料欄位都是相同的: - `id`: (string) 編號 - `context`: (string) 問題內容的上下文 - `question`: (string) 問題 - `answers`: 問題回答(基於內容的上下文來提取), 在SQuAD的結構裡, `text` 與 `answer_start` 是一個 list 列表 - `text`: list(string) 問題的答案 - `answer_start`: list(int) 問題的答案位於 `context` 上下文中的位置 ## 資料分割 這個資料集總有下列的分割(split)子集: - `train`: 10,142 筆 - `test`: 1,002 筆 - `validation`: 3,219 筆 ## 如何使用 ```python from datasets import load_dataset # 請使用 `split="train"` 參數來指定要使用的分割(split) dataset = load_dataset("erhwenkuo/squad-cmrc2018-zhtw", split="train") ``` 詳細的教學可參考: - [NLP 課程-問答系統](https://huggingface.co/learn/nlp-course/zh-TW/chapter7/7?fw=pt) ## 許可資訊 CC BY-SA 4.0 ## 論文引用 ``` @inproceedings{cui-emnlp2019-cmrc2018, title = "A Span-Extraction Dataset for {C}hinese Machine Reading Comprehension", author = "Cui, Yiming and Liu, Ting and Che, Wanxiang and Xiao, Li and Chen, Zhipeng and Ma, Wentao and Wang, Shijin and Hu, Guoping", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", month = nov, year = "2019", address = "Hong Kong, China", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/D19-1600", doi = "10.18653/v1/D19-1600", pages = "5886--5891", } ```
erhwenkuo/squad-cmrc2018-zhtw
[ "task_categories:question-answering", "size_categories:10K<n<100K", "language:zh", "license:cc-by-sa-4.0", "region:us" ]
2023-10-15T02:22:15+00:00
{"language": ["zh"], "license": "cc-by-sa-4.0", "size_categories": ["10K<n<100K"], "task_categories": ["question-answering"], "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answers", "sequence": [{"name": "text", "dtype": "string"}, {"name": "answer_start", "dtype": "int32"}]}], "splits": [{"name": "train", "num_bytes": 14839890, "num_examples": 10142}, {"name": "validation", "num_bytes": 4976411, "num_examples": 3219}, {"name": "test", "num_bytes": 1534360, "num_examples": 1002}], "download_size": 4781898, "dataset_size": 21350661}}
2023-10-15T03:52:32+00:00
[]
[ "zh" ]
TAGS #task_categories-question-answering #size_categories-10K<n<100K #language-Chinese #license-cc-by-sa-4.0 #region-us
# Dataset Card for "squad-cmrc2018-zhtw" ## 資料集摘要 CMRC 2018 是第二屆「訊飛盃」中文機器閱讀理解頒獎研討會(CMRC 2018)中相關競賽所使用的資料集。 它主要用於中文機器閱讀理解的跨度提取資料集,以增加該領域的語言多樣性。該資料集由人類專家在維基百科段落上註釋的近 20,000 個真實問題組成。 同時它也註釋了一個挑戰集,其中包含需要在整個上下文中進行全面理解和多句推理的問題。 原始資料來源: - URL - URL ## 資料下載清理 1. 下載 cmrc2018 資料集 2. 使用 OpenCC 來進行簡繁轉換 3. 使用 Python 正規表示式來清理一些殘留在 'context', 'question', 'answer' 的不必要字元 4. 根據 'URL' 來重新計算 'answers.answer_start' 的字元位置 5. 使用 Huggingface Datasets 來上傳至 Huggingface Hub ## 資料集結構 範例如下: ## 資料欄位 所有配置(Split)的資料欄位都是相同的: - 'id': (string) 編號 - 'context': (string) 問題內容的上下文 - 'question': (string) 問題 - 'answers': 問題回答(基於內容的上下文來提取), 在SQuAD的結構裡, 'text' 與 'answer_start' 是一個 list 列表 - 'text': list(string) 問題的答案 - 'answer_start': list(int) 問題的答案位於 'context' 上下文中的位置 ## 資料分割 這個資料集總有下列的分割(split)子集: - 'train': 10,142 筆 - 'test': 1,002 筆 - 'validation': 3,219 筆 ## 如何使用 詳細的教學可參考: - NLP 課程-問答系統 ## 許可資訊 CC BY-SA 4.0 ## 論文引用
[ "# Dataset Card for \"squad-cmrc2018-zhtw\"", "## 資料集摘要\n\nCMRC 2018 是第二屆「訊飛盃」中文機器閱讀理解頒獎研討會(CMRC 2018)中相關競賽所使用的資料集。\n\n它主要用於中文機器閱讀理解的跨度提取資料集,以增加該領域的語言多樣性。該資料集由人類專家在維基百科段落上註釋的近 20,000 個真實問題組成。\n\n同時它也註釋了一個挑戰集,其中包含需要在整個上下文中進行全面理解和多句推理的問題。\n\n原始資料來源:\n- URL\n- URL", "## 資料下載清理\n\n1. 下載 cmrc2018 資料集\n2. 使用 OpenCC 來進行簡繁轉換\n3. 使用 Python 正規表示式來清理一些殘留在 'context', 'question', 'answer' 的不必要字元\n4. 根據 'URL' 來重新計算 'answers.answer_start' 的字元位置\n5. 使用 Huggingface Datasets 來上傳至 Huggingface Hub", "## 資料集結構\n\n範例如下:", "## 資料欄位\n\n所有配置(Split)的資料欄位都是相同的:\n\n- 'id': (string) 編號\n- 'context': (string) 問題內容的上下文\n- 'question': (string) 問題\n- 'answers': 問題回答(基於內容的上下文來提取), 在SQuAD的結構裡, 'text' 與 'answer_start' 是一個 list 列表\n - 'text': list(string) 問題的答案\n - 'answer_start': list(int) 問題的答案位於 'context' 上下文中的位置", "## 資料分割\n\n這個資料集總有下列的分割(split)子集:\n\n- 'train': 10,142 筆\n- 'test': 1,002 筆\n- 'validation': 3,219 筆", "## 如何使用\n\n\n\n詳細的教學可參考:\n- NLP 課程-問答系統", "## 許可資訊\n\nCC BY-SA 4.0", "## 論文引用" ]
[ "TAGS\n#task_categories-question-answering #size_categories-10K<n<100K #language-Chinese #license-cc-by-sa-4.0 #region-us \n", "# Dataset Card for \"squad-cmrc2018-zhtw\"", "## 資料集摘要\n\nCMRC 2018 是第二屆「訊飛盃」中文機器閱讀理解頒獎研討會(CMRC 2018)中相關競賽所使用的資料集。\n\n它主要用於中文機器閱讀理解的跨度提取資料集,以增加該領域的語言多樣性。該資料集由人類專家在維基百科段落上註釋的近 20,000 個真實問題組成。\n\n同時它也註釋了一個挑戰集,其中包含需要在整個上下文中進行全面理解和多句推理的問題。\n\n原始資料來源:\n- URL\n- URL", "## 資料下載清理\n\n1. 下載 cmrc2018 資料集\n2. 使用 OpenCC 來進行簡繁轉換\n3. 使用 Python 正規表示式來清理一些殘留在 'context', 'question', 'answer' 的不必要字元\n4. 根據 'URL' 來重新計算 'answers.answer_start' 的字元位置\n5. 使用 Huggingface Datasets 來上傳至 Huggingface Hub", "## 資料集結構\n\n範例如下:", "## 資料欄位\n\n所有配置(Split)的資料欄位都是相同的:\n\n- 'id': (string) 編號\n- 'context': (string) 問題內容的上下文\n- 'question': (string) 問題\n- 'answers': 問題回答(基於內容的上下文來提取), 在SQuAD的結構裡, 'text' 與 'answer_start' 是一個 list 列表\n - 'text': list(string) 問題的答案\n - 'answer_start': list(int) 問題的答案位於 'context' 上下文中的位置", "## 資料分割\n\n這個資料集總有下列的分割(split)子集:\n\n- 'train': 10,142 筆\n- 'test': 1,002 筆\n- 'validation': 3,219 筆", "## 如何使用\n\n\n\n詳細的教學可參考:\n- NLP 課程-問答系統", "## 許可資訊\n\nCC BY-SA 4.0", "## 論文引用" ]
[ 46, 16, 123, 97, 10, 137, 52, 20, 9, 4 ]
[ "passage: TAGS\n#task_categories-question-answering #size_categories-10K<n<100K #language-Chinese #license-cc-by-sa-4.0 #region-us \n# Dataset Card for \"squad-cmrc2018-zhtw\"## 資料集摘要\n\nCMRC 2018 是第二屆「訊飛盃」中文機器閱讀理解頒獎研討會(CMRC 2018)中相關競賽所使用的資料集。\n\n它主要用於中文機器閱讀理解的跨度提取資料集,以增加該領域的語言多樣性。該資料集由人類專家在維基百科段落上註釋的近 20,000 個真實問題組成。\n\n同時它也註釋了一個挑戰集,其中包含需要在整個上下文中進行全面理解和多句推理的問題。\n\n原始資料來源:\n- URL\n- URL## 資料下載清理\n\n1. 下載 cmrc2018 資料集\n2. 使用 OpenCC 來進行簡繁轉換\n3. 使用 Python 正規表示式來清理一些殘留在 'context', 'question', 'answer' 的不必要字元\n4. 根據 'URL' 來重新計算 'answers.answer_start' 的字元位置\n5. 使用 Huggingface Datasets 來上傳至 Huggingface Hub## 資料集結構\n\n範例如下:## 資料欄位\n\n所有配置(Split)的資料欄位都是相同的:\n\n- 'id': (string) 編號\n- 'context': (string) 問題內容的上下文\n- 'question': (string) 問題\n- 'answers': 問題回答(基於內容的上下文來提取), 在SQuAD的結構裡, 'text' 與 'answer_start' 是一個 list 列表\n - 'text': list(string) 問題的答案\n - 'answer_start': list(int) 問題的答案位於 'context' 上下文中的位置## 資料分割\n\n這個資料集總有下列的分割(split)子集:\n\n- 'train': 10,142 筆\n- 'test': 1,002 筆\n- 'validation': 3,219 筆## 如何使用\n\n\n\n詳細的教學可參考:\n- NLP 課程-問答系統" ]
bdd7c94cb8ac7bfea411a5f78bbe74688d54098f
# Dataset Card for "train_data_1000" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
adityarra07/train_data_1000
[ "region:us" ]
2023-10-15T03:16:09+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "transcription", "dtype": "string"}, {"name": "id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 168512774.90163255, "num_examples": 1000}, {"name": "test", "num_bytes": 33702458.98032651, "num_examples": 200}], "download_size": 191731422, "dataset_size": 202215233.88195905}}
2023-10-15T03:16:17+00:00
[]
[]
TAGS #region-us
# Dataset Card for "train_data_1000" More Information needed
[ "# Dataset Card for \"train_data_1000\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"train_data_1000\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"train_data_1000\"\n\nMore Information needed" ]
173ac5ae63454155561d3790bd1ed49309af17cd
# Dataset Card for "train_data_5000" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
adityarra07/train_data_5000
[ "region:us" ]
2023-10-15T03:16:17+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "transcription", "dtype": "string"}, {"name": "id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 842563984.5081627, "num_examples": 5000}, {"name": "test", "num_bytes": 33702427.98032651, "num_examples": 200}], "download_size": 859088007, "dataset_size": 876266412.4884893}}
2023-10-15T03:16:49+00:00
[]
[]
TAGS #region-us
# Dataset Card for "train_data_5000" More Information needed
[ "# Dataset Card for \"train_data_5000\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"train_data_5000\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"train_data_5000\"\n\nMore Information needed" ]
90f0b0a773a3d48001771c243b30032ebe7f3ca4
# Dataset Card for "train_data_10000" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
adityarra07/train_data_10000
[ "region:us" ]
2023-10-15T03:16:50+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "transcription", "dtype": "string"}, {"name": "id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1685125759.0163252, "num_examples": 10000}, {"name": "test", "num_bytes": 33702518.98032651, "num_examples": 200}], "download_size": 1671802370, "dataset_size": 1718828277.9966516}}
2023-10-15T03:17:41+00:00
[]
[]
TAGS #region-us
# Dataset Card for "train_data_10000" More Information needed
[ "# Dataset Card for \"train_data_10000\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"train_data_10000\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"train_data_10000\"\n\nMore Information needed" ]
034ec09859ae74f03f2b6baeadf1e984031a2272
# Dataset Card for "train_data_15000" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
adityarra07/train_data_15000
[ "region:us" ]
2023-10-15T03:17:41+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "transcription", "dtype": "string"}, {"name": "id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2527685083.524488, "num_examples": 15000}, {"name": "test", "num_bytes": 33702566.98032651, "num_examples": 200}], "download_size": 2525375368, "dataset_size": 2561387650.5048146}}
2023-10-15T03:18:56+00:00
[]
[]
TAGS #region-us
# Dataset Card for "train_data_15000" More Information needed
[ "# Dataset Card for \"train_data_15000\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"train_data_15000\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"train_data_15000\"\n\nMore Information needed" ]
5c7111bbae656a8d758dbf3fd2cd7abd0bd7fb90
# Dataset Card for "train_data_20000" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
adityarra07/train_data_20000
[ "region:us" ]
2023-10-15T03:18:56+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "transcription", "dtype": "string"}, {"name": "id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3370249038.032651, "num_examples": 20000}, {"name": "test", "num_bytes": 33702564.98032651, "num_examples": 200}], "download_size": 3324093596, "dataset_size": 3403951603.0129776}}
2023-10-15T03:20:33+00:00
[]
[]
TAGS #region-us
# Dataset Card for "train_data_20000" More Information needed
[ "# Dataset Card for \"train_data_20000\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"train_data_20000\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"train_data_20000\"\n\nMore Information needed" ]
ef7b9eb0a0c47da2baeebf53089c052247c6d16a
# Dataset Card for "train_data_25000" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
adityarra07/train_data_25000
[ "region:us" ]
2023-10-15T03:20:33+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "transcription", "dtype": "string"}, {"name": "id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4212813572.5408134, "num_examples": 25000}, {"name": "test", "num_bytes": 33702421.98032651, "num_examples": 200}], "download_size": 4159760175, "dataset_size": 4246515994.52114}}
2023-10-15T03:22:33+00:00
[]
[]
TAGS #region-us
# Dataset Card for "train_data_25000" More Information needed
[ "# Dataset Card for \"train_data_25000\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"train_data_25000\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"train_data_25000\"\n\nMore Information needed" ]
9013abd7766d77d18424625d0dc16ce055321e32
# Dataset Card for "train_data_30000" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
adityarra07/train_data_30000
[ "region:us" ]
2023-10-15T03:22:33+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "transcription", "dtype": "string"}, {"name": "id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 5055383607.048976, "num_examples": 30000}, {"name": "test", "num_bytes": 33702525.98032651, "num_examples": 200}], "download_size": 4975038674, "dataset_size": 5089086133.029303}}
2023-10-15T03:25:05+00:00
[]
[]
TAGS #region-us
# Dataset Card for "train_data_30000" More Information needed
[ "# Dataset Card for \"train_data_30000\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"train_data_30000\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"train_data_30000\"\n\nMore Information needed" ]
8a14ac6ec316b710d332fee7e966e693ee9f79da
# Dataset Card for "test_data" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
adityarra07/test_data
[ "region:us" ]
2023-10-15T03:25:05+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "transcription", "dtype": "string"}, {"name": "id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 133411975.96105696, "num_examples": 1001}], "download_size": 134756772, "dataset_size": 133411975.96105696}}
2023-10-19T10:45:48+00:00
[]
[]
TAGS #region-us
# Dataset Card for "test_data" More Information needed
[ "# Dataset Card for \"test_data\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"test_data\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"test_data\"\n\nMore Information needed" ]
f437731f30d259e30e7812296f09ffdd2d2eaee4
# Dataset Card for "519c571e" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
result-kand2-sdxl-wuerst-karlo/519c571e
[ "region:us" ]
2023-10-15T03:31:59+00:00
{"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 210, "num_examples": 10}], "download_size": 1378, "dataset_size": 210}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-15T03:32:00+00:00
[]
[]
TAGS #region-us
# Dataset Card for "519c571e" More Information needed
[ "# Dataset Card for \"519c571e\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"519c571e\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"519c571e\"\n\nMore Information needed" ]
ec68ff608cba9e21b90197a97fb17485f1574f3d
# Big Five Personality Traits ## OCEAN * Openness * Conscientiousness * Extraversion * Agreeableness * Neuroticism
MTHR/OCEAN
[ "task_categories:question-answering", "size_categories:1K<n<10K", "language:en", "license:mit", "psychology", "bigfive", "big5", "region:us" ]
2023-10-15T04:09:46+00:00
{"language": ["en"], "license": "mit", "size_categories": ["1K<n<10K"], "task_categories": ["question-answering"], "pretty_name": "Big Five Personality Traits", "tags": ["psychology", "bigfive", "big5"]}
2023-10-15T22:57:35+00:00
[]
[ "en" ]
TAGS #task_categories-question-answering #size_categories-1K<n<10K #language-English #license-mit #psychology #bigfive #big5 #region-us
# Big Five Personality Traits ## OCEAN * Openness * Conscientiousness * Extraversion * Agreeableness * Neuroticism
[ "# Big Five Personality Traits", "## OCEAN\n* Openness\n* Conscientiousness\n* Extraversion\n* Agreeableness\n* Neuroticism" ]
[ "TAGS\n#task_categories-question-answering #size_categories-1K<n<10K #language-English #license-mit #psychology #bigfive #big5 #region-us \n", "# Big Five Personality Traits", "## OCEAN\n* Openness\n* Conscientiousness\n* Extraversion\n* Agreeableness\n* Neuroticism" ]
[ 50, 7, 25 ]
[ "passage: TAGS\n#task_categories-question-answering #size_categories-1K<n<10K #language-English #license-mit #psychology #bigfive #big5 #region-us \n# Big Five Personality Traits## OCEAN\n* Openness\n* Conscientiousness\n* Extraversion\n* Agreeableness\n* Neuroticism" ]
670321fa8d05f528722b0208609bcbf7b0b3c921
# Dataset Card for "textbook-codex-oai-0" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
crumb/textbook-codex-oai-0
[ "region:us" ]
2023-10-15T04:10:55+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "src", "dtype": "string"}, {"name": "src_col", "dtype": "string"}, {"name": "model", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 100059225.10238275, "num_examples": 29265}], "download_size": 521517482, "dataset_size": 100059225.10238275}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-15T04:12:26+00:00
[]
[]
TAGS #region-us
# Dataset Card for "textbook-codex-oai-0" More Information needed
[ "# Dataset Card for \"textbook-codex-oai-0\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"textbook-codex-oai-0\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"textbook-codex-oai-0\"\n\nMore Information needed" ]
c3f1339e970944075a2bcac652fc20453ce7cbe2
# Dataset Card for Evaluation run of beaugogh/Llama2-7b-openorca-mc-v1 ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/beaugogh/Llama2-7b-openorca-mc-v1 - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [beaugogh/Llama2-7b-openorca-mc-v1](https://huggingface.co/beaugogh/Llama2-7b-openorca-mc-v1) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_beaugogh__Llama2-7b-openorca-mc-v1", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-10-15T05:51:30.480988](https://huggingface.co/datasets/open-llm-leaderboard/details_beaugogh__Llama2-7b-openorca-mc-v1/blob/main/results_2023-10-15T05-51-30.480988.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "em": 0.0012583892617449664, "em_stderr": 0.00036305608931189984, "f1": 0.054172609060402964, "f1_stderr": 0.0013304749578777586, "acc": 0.38787336798763505, "acc_stderr": 0.0089323131312436 }, "harness|drop|3": { "em": 0.0012583892617449664, "em_stderr": 0.00036305608931189984, "f1": 0.054172609060402964, "f1_stderr": 0.0013304749578777586 }, "harness|gsm8k|5": { "acc": 0.04094010614101592, "acc_stderr": 0.0054580767962943404 }, "harness|winogrande|5": { "acc": 0.7348066298342542, "acc_stderr": 0.01240654946619286 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_beaugogh__Llama2-7b-openorca-mc-v1
[ "region:us" ]
2023-10-15T04:51:34+00:00
{"pretty_name": "Evaluation run of beaugogh/Llama2-7b-openorca-mc-v1", "dataset_summary": "Dataset automatically created during the evaluation run of model [beaugogh/Llama2-7b-openorca-mc-v1](https://huggingface.co/beaugogh/Llama2-7b-openorca-mc-v1) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_beaugogh__Llama2-7b-openorca-mc-v1\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-10-15T05:51:30.480988](https://huggingface.co/datasets/open-llm-leaderboard/details_beaugogh__Llama2-7b-openorca-mc-v1/blob/main/results_2023-10-15T05-51-30.480988.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"em\": 0.0012583892617449664,\n \"em_stderr\": 0.00036305608931189984,\n \"f1\": 0.054172609060402964,\n \"f1_stderr\": 0.0013304749578777586,\n \"acc\": 0.38787336798763505,\n \"acc_stderr\": 0.0089323131312436\n },\n \"harness|drop|3\": {\n \"em\": 0.0012583892617449664,\n \"em_stderr\": 0.00036305608931189984,\n \"f1\": 0.054172609060402964,\n \"f1_stderr\": 0.0013304749578777586\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.04094010614101592,\n \"acc_stderr\": 0.0054580767962943404\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.7348066298342542,\n \"acc_stderr\": 0.01240654946619286\n }\n}\n```", "repo_url": "https://huggingface.co/beaugogh/Llama2-7b-openorca-mc-v1", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_drop_3", "data_files": [{"split": "2023_10_15T05_51_30.480988", "path": ["**/details_harness|drop|3_2023-10-15T05-51-30.480988.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-10-15T05-51-30.480988.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_10_15T05_51_30.480988", "path": ["**/details_harness|gsm8k|5_2023-10-15T05-51-30.480988.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-10-15T05-51-30.480988.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_10_15T05_51_30.480988", "path": ["**/details_harness|winogrande|5_2023-10-15T05-51-30.480988.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-10-15T05-51-30.480988.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_10_15T05_51_30.480988", "path": ["results_2023-10-15T05-51-30.480988.parquet"]}, {"split": "latest", "path": ["results_2023-10-15T05-51-30.480988.parquet"]}]}]}
2023-10-15T04:51:42+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of beaugogh/Llama2-7b-openorca-mc-v1 ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model beaugogh/Llama2-7b-openorca-mc-v1 on the Open LLM Leaderboard. The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-10-15T05:51:30.480988(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of beaugogh/Llama2-7b-openorca-mc-v1", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model beaugogh/Llama2-7b-openorca-mc-v1 on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-15T05:51:30.480988(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of beaugogh/Llama2-7b-openorca-mc-v1", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model beaugogh/Llama2-7b-openorca-mc-v1 on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-15T05:51:30.480988(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 28, 31, 176, 67, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of beaugogh/Llama2-7b-openorca-mc-v1## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model beaugogh/Llama2-7b-openorca-mc-v1 on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-10-15T05:51:30.480988(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
8091f0dfe7c27cc5b43134c7f79d42711b0d78f7
# Dataset Card for "character_prompts_arabic" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Falah/character_prompts_arabic
[ "region:us" ]
2023-10-15T05:45:08+00:00
{"dataset_info": {"features": [{"name": "prompts", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 5947578, "num_examples": 10000}], "download_size": 686117, "dataset_size": 5947578}}
2023-10-15T05:45:09+00:00
[]
[]
TAGS #region-us
# Dataset Card for "character_prompts_arabic" More Information needed
[ "# Dataset Card for \"character_prompts_arabic\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"character_prompts_arabic\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"character_prompts_arabic\"\n\nMore Information needed" ]
1998469e4e586f31e9ac2e4e6190ddf9b3bed26a
Faceset of the current prezident of Ukraine, 8480 aligned pictures (JPG) of his face from the latest UN meating https://cs.wikipedia.org/wiki/Volodymyr_Zelenskyj
Pampkinus/Volodymyr-Zelenskyj
[ "license:openrail", "region:us" ]
2023-10-15T05:48:28+00:00
{"license": "openrail"}
2023-10-15T06:16:18+00:00
[]
[]
TAGS #license-openrail #region-us
Faceset of the current prezident of Ukraine, 8480 aligned pictures (JPG) of his face from the latest UN meating URL
[]
[ "TAGS\n#license-openrail #region-us \n" ]
[ 12 ]
[ "passage: TAGS\n#license-openrail #region-us \n" ]
67a892f8239390d1f0d54a38e4417828a1dd89ee
# Dataset Card for "librispeech_ds1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
hsali/librispeech_ds1
[ "region:us" ]
2023-10-15T05:48:30+00:00
{"dataset_info": {"features": [{"name": "data", "dtype": "string"}, {"name": "file_name", "dtype": "string"}, {"name": "path", "dtype": "string"}, {"name": "emotion", "dtype": "null"}, {"name": "gender", "dtype": "string"}, {"name": "augmentation", "dtype": "string"}, {"name": "data_type", "dtype": "string"}, {"name": "session_id", "dtype": "null"}, {"name": "input_values", "sequence": "float32"}, {"name": "labels", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 568043983, "num_examples": 2001}], "download_size": 500409042, "dataset_size": 568043983}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-15T05:49:19+00:00
[]
[]
TAGS #region-us
# Dataset Card for "librispeech_ds1" More Information needed
[ "# Dataset Card for \"librispeech_ds1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"librispeech_ds1\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"librispeech_ds1\"\n\nMore Information needed" ]
d7bedfa60c66637a477e9a3ec4e8fff941c7e8ce
# Dataset Card for "Small_Alpaca_Instruct" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
PiyushLavaniya/Small_Alpaca_Instruct
[ "region:us" ]
2023-10-15T06:00:01+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 17849270.7, "num_examples": 9000}, {"name": "test", "num_bytes": 1983252.3, "num_examples": 1000}], "download_size": 6069153, "dataset_size": 19832523.0}}
2023-10-15T06:00:06+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Small_Alpaca_Instruct" More Information needed
[ "# Dataset Card for \"Small_Alpaca_Instruct\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Small_Alpaca_Instruct\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"Small_Alpaca_Instruct\"\n\nMore Information needed" ]
64b2feaae60da68314149e7201d61b8ff3ac1a3b
# Dataset Card for "sql" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mhenrichsen/sql
[ "region:us" ]
2023-10-15T06:32:44+00:00
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "answer", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 17385628, "num_examples": 78356}], "download_size": 7203703, "dataset_size": 17385628}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-15T06:32:47+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sql" More Information needed
[ "# Dataset Card for \"sql\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sql\"\n\nMore Information needed" ]
[ 6, 12 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"sql\"\n\nMore Information needed" ]
b1068fc5cc681729f1d6629acd83accfd38316f3
All of the models I made using my bulk RVC mangio model maker.
Prompt-Pirate/prompt-pirate-rvc-models
[ "region:us" ]
2023-10-15T06:45:30+00:00
{}
2023-10-15T21:22:37+00:00
[]
[]
TAGS #region-us
All of the models I made using my bulk RVC mangio model maker.
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
ec0df5f48c4057da0d5c777f8b50d75b97bf12d3
# TyDiQA-GoldP-Th This dataset contains a removed Thai TyDiQA dataset obtained from [Khalidalt's TyDiQA Dataset](https://huggingface.co/datasets/khalidalt/tydiqa-goldp). This dataset version does the following additional preprocessing to the dataset 1. Convert byte-level index into character-level index 2. Fix any mismatch text between answer span and actual text 3. Re-split train/development set such that there's no leakage in context passage 4. Deduplicate questions from the same context passage ## Dataset Format The dataset is formatted to make it compatible to [XTREME benchmark](https://github.com/google-research/xtreme) format. The data is formatted as the following pattern: ```json { "version": "TyDiQA-GoldP-1.1-for-SQuAD-1.1", "data": [ { "paragrahs": [{ "context": [PASSAGE CONTEXT HERE], "qas": [{ "answers": [{ "answer_start": [CONTEXT START CHAR INDEX OF ANSWER], "text": [TEXT SPAN FROM CONTEXT], }], "question": [QUESTION], "id": [ID] }] }], }, ... ] } ``` ## Author Chompakorn Chaksangchaichot
chompk/tydiqa-goldp-th
[ "task_categories:question-answering", "task_ids:extractive-qa", "language:th", "region:us" ]
2023-10-15T06:48:48+00:00
{"language": ["th"], "task_categories": ["question-answering"], "task_ids": ["extractive-qa"], "pretty_name": "TyDiQA-GoldP-Th", "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "tydiqa.goldp.th.train.json"}, {"split": "dev", "path": "tydiqa.goldp.th.dev.json"}]}]}
2023-11-18T14:12:59+00:00
[]
[ "th" ]
TAGS #task_categories-question-answering #task_ids-extractive-qa #language-Thai #region-us
# TyDiQA-GoldP-Th This dataset contains a removed Thai TyDiQA dataset obtained from Khalidalt's TyDiQA Dataset. This dataset version does the following additional preprocessing to the dataset 1. Convert byte-level index into character-level index 2. Fix any mismatch text between answer span and actual text 3. Re-split train/development set such that there's no leakage in context passage 4. Deduplicate questions from the same context passage ## Dataset Format The dataset is formatted to make it compatible to XTREME benchmark format. The data is formatted as the following pattern: ## Author Chompakorn Chaksangchaichot
[ "# TyDiQA-GoldP-Th\nThis dataset contains a removed Thai TyDiQA dataset obtained from Khalidalt's TyDiQA Dataset.\nThis dataset version does the following additional preprocessing to the dataset\n1. Convert byte-level index into character-level index\n2. Fix any mismatch text between answer span and actual text\n3. Re-split train/development set such that there's no leakage in context passage\n4. Deduplicate questions from the same context passage", "## Dataset Format\nThe dataset is formatted to make it compatible to XTREME benchmark format. The data is formatted as the following pattern:", "## Author\nChompakorn Chaksangchaichot" ]
[ "TAGS\n#task_categories-question-answering #task_ids-extractive-qa #language-Thai #region-us \n", "# TyDiQA-GoldP-Th\nThis dataset contains a removed Thai TyDiQA dataset obtained from Khalidalt's TyDiQA Dataset.\nThis dataset version does the following additional preprocessing to the dataset\n1. Convert byte-level index into character-level index\n2. Fix any mismatch text between answer span and actual text\n3. Re-split train/development set such that there's no leakage in context passage\n4. Deduplicate questions from the same context passage", "## Dataset Format\nThe dataset is formatted to make it compatible to XTREME benchmark format. The data is formatted as the following pattern:", "## Author\nChompakorn Chaksangchaichot" ]
[ 34, 108, 31, 12 ]
[ "passage: TAGS\n#task_categories-question-answering #task_ids-extractive-qa #language-Thai #region-us \n# TyDiQA-GoldP-Th\nThis dataset contains a removed Thai TyDiQA dataset obtained from Khalidalt's TyDiQA Dataset.\nThis dataset version does the following additional preprocessing to the dataset\n1. Convert byte-level index into character-level index\n2. Fix any mismatch text between answer span and actual text\n3. Re-split train/development set such that there's no leakage in context passage\n4. Deduplicate questions from the same context passage## Dataset Format\nThe dataset is formatted to make it compatible to XTREME benchmark format. The data is formatted as the following pattern:## Author\nChompakorn Chaksangchaichot" ]
22dd56aea94f428cde556e34ece1e75897d2d7a8
# Dataset Card for "character_prompts_arabic_best" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Falah/character_prompts_arabic_best
[ "region:us" ]
2023-10-15T07:01:49+00:00
{"dataset_info": {"features": [{"name": "prompts", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 6877577, "num_examples": 10000}], "download_size": 1004618, "dataset_size": 6877577}}
2023-10-15T07:01:50+00:00
[]
[]
TAGS #region-us
# Dataset Card for "character_prompts_arabic_best" More Information needed
[ "# Dataset Card for \"character_prompts_arabic_best\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"character_prompts_arabic_best\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"character_prompts_arabic_best\"\n\nMore Information needed" ]
4fc6fc09601675b97d36bc41c29d660cda7347ee
# Dataset Card for "toolkit_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
rehanbrr/toolkit_dataset
[ "region:us" ]
2023-10-15T07:03:03+00:00
{"dataset_info": {"features": [{"name": "doi", "dtype": "string"}, {"name": "chunk_id", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "chunk", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 139684, "num_examples": 80}], "download_size": 74668, "dataset_size": 139684}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-15T07:03:08+00:00
[]
[]
TAGS #region-us
# Dataset Card for "toolkit_dataset" More Information needed
[ "# Dataset Card for \"toolkit_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"toolkit_dataset\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"toolkit_dataset\"\n\nMore Information needed" ]
43247e9f99046471b9a0aa8649467764caeda98a
# Dataset Card for "chinese_fonts_single_128x128" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
poorguys/chinese_fonts_single_128x128
[ "region:us" ]
2023-10-15T07:04:30+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "char", "dtype": "string"}, {"name": "unicode", "dtype": "string"}, {"name": "font", "dtype": "string"}, {"name": "font_type", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 244349.0, "num_examples": 65}], "download_size": 239164, "dataset_size": 244349.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-15T07:04:37+00:00
[]
[]
TAGS #region-us
# Dataset Card for "chinese_fonts_single_128x128" More Information needed
[ "# Dataset Card for \"chinese_fonts_single_128x128\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"chinese_fonts_single_128x128\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"chinese_fonts_single_128x128\"\n\nMore Information needed" ]
5396decadd5392b0edddd104b4def30a53a30dc6
# Dataset Card for "sum_only_sft" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
sayan1101/sum_only_sft
[ "region:us" ]
2023-10-15T07:24:11+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}, {"config_name": "sayan1101", "data_files": [{"split": "train", "path": "sayan1101/train-*"}, {"split": "test", "path": "sayan1101/test-*"}]}], "dataset_info": [{"config_name": "default", "features": [{"name": "prompt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1257397090, "num_examples": 287113}], "download_size": 739106590, "dataset_size": 1257397090}, {"config_name": "sayan1101", "features": [{"name": "prompt", "dtype": "string"}, {"name": "Instruction", "dtype": "string"}, {"name": "Response", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 440245021.64652944, "num_examples": 50000}, {"name": "test", "num_bytes": 44024502.16465294, "num_examples": 5000}], "download_size": 294043268, "dataset_size": 484269523.8111824}]}
2023-10-16T09:54:12+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sum_only_sft" More Information needed
[ "# Dataset Card for \"sum_only_sft\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sum_only_sft\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"sum_only_sft\"\n\nMore Information needed" ]
131faf6d2db0f5187b38340cd698635c68861aea
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Data Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed] ## Training procedure The following `bitsandbytes` quantization config was used during training: - quant_method: bitsandbytes - load_in_8bit: True - load_in_4bit: False - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: fp4 - bnb_4bit_use_double_quant: False - bnb_4bit_compute_dtype: float32 ### Framework versions - PEFT 0.6.0.dev0
GGital/CAI_ENG_NEW_01
[ "arxiv:1910.09700", "region:us" ]
2023-10-15T08:12:05+00:00
{"library_name": "peft", "base_model": "decapoda-research/llama-7b-hf"}
2023-10-15T08:12:30+00:00
[ "1910.09700" ]
[]
TAGS #arxiv-1910.09700 #region-us
# Model Card for Model ID ## Model Details ### Model Description - Developed by: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact ## Training procedure The following 'bitsandbytes' quantization config was used during training: - quant_method: bitsandbytes - load_in_8bit: True - load_in_4bit: False - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: fp4 - bnb_4bit_use_double_quant: False - bnb_4bit_compute_dtype: float32 ### Framework versions - PEFT 0.6.0.dev0
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\n\n\n- Developed by: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact", "## Training procedure\n\n\nThe following 'bitsandbytes' quantization config was used during training:\n- quant_method: bitsandbytes\n- load_in_8bit: True\n- load_in_4bit: False\n- llm_int8_threshold: 6.0\n- llm_int8_skip_modules: None\n- llm_int8_enable_fp32_cpu_offload: False\n- llm_int8_has_fp16_weight: False\n- bnb_4bit_quant_type: fp4\n- bnb_4bit_use_double_quant: False\n- bnb_4bit_compute_dtype: float32", "### Framework versions\n\n\n- PEFT 0.6.0.dev0" ]
[ "TAGS\n#arxiv-1910.09700 #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\n\n\n- Developed by: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact", "## Training procedure\n\n\nThe following 'bitsandbytes' quantization config was used during training:\n- quant_method: bitsandbytes\n- load_in_8bit: True\n- load_in_4bit: False\n- llm_int8_threshold: 6.0\n- llm_int8_skip_modules: None\n- llm_int8_enable_fp32_cpu_offload: False\n- llm_int8_has_fp16_weight: False\n- bnb_4bit_quant_type: fp4\n- bnb_4bit_use_double_quant: False\n- bnb_4bit_compute_dtype: float32", "### Framework versions\n\n\n- PEFT 0.6.0.dev0" ]
[ 15, 6, 3, 45, 28, 3, 4, 9, 9, 10, 42, 20, 3, 4, 5, 9, 11, 13, 3, 12, 5, 4, 5, 3, 4, 9, 53, 9, 8, 6, 3, 14, 8, 7, 9, 4, 164, 15 ]
[ "passage: TAGS\n#arxiv-1910.09700 #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\n\n\n- Developed by: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
8efa91b84dc029ad987c5413a50cb2c3a0d44db2
# Dataset Card for Evaluation run of beaugogh/Llama2-13b-sharegpt4 ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/beaugogh/Llama2-13b-sharegpt4 - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [beaugogh/Llama2-13b-sharegpt4](https://huggingface.co/beaugogh/Llama2-13b-sharegpt4) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_beaugogh__Llama2-13b-sharegpt4", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-10-15T09:30:37.851108](https://huggingface.co/datasets/open-llm-leaderboard/details_beaugogh__Llama2-13b-sharegpt4/blob/main/results_2023-10-15T09-30-37.851108.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "em": 0.001153523489932886, "em_stderr": 0.00034761798968571027, "f1": 0.05843015939597327, "f1_stderr": 0.0013137444686186492, "acc": 0.4200579473220307, "acc_stderr": 0.009967774108676528 }, "harness|drop|3": { "em": 0.001153523489932886, "em_stderr": 0.00034761798968571027, "f1": 0.05843015939597327, "f1_stderr": 0.0013137444686186492 }, "harness|gsm8k|5": { "acc": 0.08794541319181198, "acc_stderr": 0.007801162197487711 }, "harness|winogrande|5": { "acc": 0.7521704814522494, "acc_stderr": 0.012134386019865348 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_beaugogh__Llama2-13b-sharegpt4
[ "region:us" ]
2023-10-15T08:30:41+00:00
{"pretty_name": "Evaluation run of beaugogh/Llama2-13b-sharegpt4", "dataset_summary": "Dataset automatically created during the evaluation run of model [beaugogh/Llama2-13b-sharegpt4](https://huggingface.co/beaugogh/Llama2-13b-sharegpt4) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_beaugogh__Llama2-13b-sharegpt4\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-10-15T09:30:37.851108](https://huggingface.co/datasets/open-llm-leaderboard/details_beaugogh__Llama2-13b-sharegpt4/blob/main/results_2023-10-15T09-30-37.851108.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"em\": 0.001153523489932886,\n \"em_stderr\": 0.00034761798968571027,\n \"f1\": 0.05843015939597327,\n \"f1_stderr\": 0.0013137444686186492,\n \"acc\": 0.4200579473220307,\n \"acc_stderr\": 0.009967774108676528\n },\n \"harness|drop|3\": {\n \"em\": 0.001153523489932886,\n \"em_stderr\": 0.00034761798968571027,\n \"f1\": 0.05843015939597327,\n \"f1_stderr\": 0.0013137444686186492\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.08794541319181198,\n \"acc_stderr\": 0.007801162197487711\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.7521704814522494,\n \"acc_stderr\": 0.012134386019865348\n }\n}\n```", "repo_url": "https://huggingface.co/beaugogh/Llama2-13b-sharegpt4", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_drop_3", "data_files": [{"split": "2023_10_15T09_30_37.851108", "path": ["**/details_harness|drop|3_2023-10-15T09-30-37.851108.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-10-15T09-30-37.851108.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_10_15T09_30_37.851108", "path": ["**/details_harness|gsm8k|5_2023-10-15T09-30-37.851108.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-10-15T09-30-37.851108.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_10_15T09_30_37.851108", "path": ["**/details_harness|winogrande|5_2023-10-15T09-30-37.851108.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-10-15T09-30-37.851108.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_10_15T09_30_37.851108", "path": ["results_2023-10-15T09-30-37.851108.parquet"]}, {"split": "latest", "path": ["results_2023-10-15T09-30-37.851108.parquet"]}]}]}
2023-10-15T08:30:50+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of beaugogh/Llama2-13b-sharegpt4 ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model beaugogh/Llama2-13b-sharegpt4 on the Open LLM Leaderboard. The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-10-15T09:30:37.851108(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of beaugogh/Llama2-13b-sharegpt4", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model beaugogh/Llama2-13b-sharegpt4 on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-15T09:30:37.851108(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of beaugogh/Llama2-13b-sharegpt4", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model beaugogh/Llama2-13b-sharegpt4 on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-15T09:30:37.851108(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 23, 31, 171, 68, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of beaugogh/Llama2-13b-sharegpt4## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model beaugogh/Llama2-13b-sharegpt4 on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-10-15T09:30:37.851108(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
4886d9736ebfae2c31d6f56c904659c42f9b8797
# Dataset Card for "test_dataset_sum" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
sayan1101/test_dataset_sum
[ "region:us" ]
2023-10-15T08:45:02+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "prompt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 49753382, "num_examples": 11490}], "download_size": 28642180, "dataset_size": 49753382}}
2023-10-15T08:53:26+00:00
[]
[]
TAGS #region-us
# Dataset Card for "test_dataset_sum" More Information needed
[ "# Dataset Card for \"test_dataset_sum\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"test_dataset_sum\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"test_dataset_sum\"\n\nMore Information needed" ]
09f39602301c53676097cd0b283f45cf31604ae1
# Dataset Card for "cyberwiki" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
khangmacon/cyberwiki
[ "region:us" ]
2023-10-15T09:20:48+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 344199475.0, "num_examples": 31170}], "download_size": 193770769, "dataset_size": 344199475.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-15T16:04:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cyberwiki" More Information needed
[ "# Dataset Card for \"cyberwiki\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cyberwiki\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cyberwiki\"\n\nMore Information needed" ]
c5e5d2156b3a3384f8871825d8e05c8d34b98c9e
# Dataset Card for Evaluation run of Yhyu13/chimera-inst-chat-13b-hf ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/Yhyu13/chimera-inst-chat-13b-hf - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [Yhyu13/chimera-inst-chat-13b-hf](https://huggingface.co/Yhyu13/chimera-inst-chat-13b-hf) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_Yhyu13__chimera-inst-chat-13b-hf", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-10-15T10:30:32.183057](https://huggingface.co/datasets/open-llm-leaderboard/details_Yhyu13__chimera-inst-chat-13b-hf/blob/main/results_2023-10-15T10-30-32.183057.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "em": 0.006606543624161074, "em_stderr": 0.0008296357389921881, "f1": 0.08297609060402691, "f1_stderr": 0.0018006483858768888, "acc": 0.4107112190060514, "acc_stderr": 0.009943586099857618 }, "harness|drop|3": { "em": 0.006606543624161074, "em_stderr": 0.0008296357389921881, "f1": 0.08297609060402691, "f1_stderr": 0.0018006483858768888 }, "harness|gsm8k|5": { "acc": 0.08188021228203184, "acc_stderr": 0.00755233852771695 }, "harness|winogrande|5": { "acc": 0.739542225730071, "acc_stderr": 0.012334833671998287 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_Yhyu13__chimera-inst-chat-13b-hf
[ "region:us" ]
2023-10-15T09:30:36+00:00
{"pretty_name": "Evaluation run of Yhyu13/chimera-inst-chat-13b-hf", "dataset_summary": "Dataset automatically created during the evaluation run of model [Yhyu13/chimera-inst-chat-13b-hf](https://huggingface.co/Yhyu13/chimera-inst-chat-13b-hf) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_Yhyu13__chimera-inst-chat-13b-hf\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-10-15T10:30:32.183057](https://huggingface.co/datasets/open-llm-leaderboard/details_Yhyu13__chimera-inst-chat-13b-hf/blob/main/results_2023-10-15T10-30-32.183057.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"em\": 0.006606543624161074,\n \"em_stderr\": 0.0008296357389921881,\n \"f1\": 0.08297609060402691,\n \"f1_stderr\": 0.0018006483858768888,\n \"acc\": 0.4107112190060514,\n \"acc_stderr\": 0.009943586099857618\n },\n \"harness|drop|3\": {\n \"em\": 0.006606543624161074,\n \"em_stderr\": 0.0008296357389921881,\n \"f1\": 0.08297609060402691,\n \"f1_stderr\": 0.0018006483858768888\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.08188021228203184,\n \"acc_stderr\": 0.00755233852771695\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.739542225730071,\n \"acc_stderr\": 0.012334833671998287\n }\n}\n```", "repo_url": "https://huggingface.co/Yhyu13/chimera-inst-chat-13b-hf", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_drop_3", "data_files": [{"split": "2023_10_15T10_30_32.183057", "path": ["**/details_harness|drop|3_2023-10-15T10-30-32.183057.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-10-15T10-30-32.183057.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_10_15T10_30_32.183057", "path": ["**/details_harness|gsm8k|5_2023-10-15T10-30-32.183057.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-10-15T10-30-32.183057.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_10_15T10_30_32.183057", "path": ["**/details_harness|winogrande|5_2023-10-15T10-30-32.183057.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-10-15T10-30-32.183057.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_10_15T10_30_32.183057", "path": ["results_2023-10-15T10-30-32.183057.parquet"]}, {"split": "latest", "path": ["results_2023-10-15T10-30-32.183057.parquet"]}]}]}
2023-10-15T09:30:44+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of Yhyu13/chimera-inst-chat-13b-hf ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model Yhyu13/chimera-inst-chat-13b-hf on the Open LLM Leaderboard. The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-10-15T10:30:32.183057(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of Yhyu13/chimera-inst-chat-13b-hf", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model Yhyu13/chimera-inst-chat-13b-hf on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-15T10:30:32.183057(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of Yhyu13/chimera-inst-chat-13b-hf", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model Yhyu13/chimera-inst-chat-13b-hf on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-15T10:30:32.183057(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 26, 31, 174, 68, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of Yhyu13/chimera-inst-chat-13b-hf## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model Yhyu13/chimera-inst-chat-13b-hf on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-10-15T10:30:32.183057(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
fe9603ad54256783b75e46a1a13b74255b4ec3fa
# Dataset Card for Evaluation run of TehVenom/DiffMerge_Pygmalion_Main-onto-V8P4 ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/TehVenom/DiffMerge_Pygmalion_Main-onto-V8P4 - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [TehVenom/DiffMerge_Pygmalion_Main-onto-V8P4](https://huggingface.co/TehVenom/DiffMerge_Pygmalion_Main-onto-V8P4) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_TehVenom__DiffMerge_Pygmalion_Main-onto-V8P4", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-10-15T10:35:56.777835](https://huggingface.co/datasets/open-llm-leaderboard/details_TehVenom__DiffMerge_Pygmalion_Main-onto-V8P4/blob/main/results_2023-10-15T10-35-56.777835.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "em": 0.002726510067114094, "em_stderr": 0.0005340111700415918, "f1": 0.05529781879194656, "f1_stderr": 0.0013448797167935412, "acc": 0.31823545497683364, "acc_stderr": 0.008263105361288367 }, "harness|drop|3": { "em": 0.002726510067114094, "em_stderr": 0.0005340111700415918, "f1": 0.05529781879194656, "f1_stderr": 0.0013448797167935412 }, "harness|gsm8k|5": { "acc": 0.011372251705837756, "acc_stderr": 0.002920666198788727 }, "harness|winogrande|5": { "acc": 0.6250986582478295, "acc_stderr": 0.013605544523788008 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_TehVenom__DiffMerge_Pygmalion_Main-onto-V8P4
[ "region:us" ]
2023-10-15T09:36:00+00:00
{"pretty_name": "Evaluation run of TehVenom/DiffMerge_Pygmalion_Main-onto-V8P4", "dataset_summary": "Dataset automatically created during the evaluation run of model [TehVenom/DiffMerge_Pygmalion_Main-onto-V8P4](https://huggingface.co/TehVenom/DiffMerge_Pygmalion_Main-onto-V8P4) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_TehVenom__DiffMerge_Pygmalion_Main-onto-V8P4\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-10-15T10:35:56.777835](https://huggingface.co/datasets/open-llm-leaderboard/details_TehVenom__DiffMerge_Pygmalion_Main-onto-V8P4/blob/main/results_2023-10-15T10-35-56.777835.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"em\": 0.002726510067114094,\n \"em_stderr\": 0.0005340111700415918,\n \"f1\": 0.05529781879194656,\n \"f1_stderr\": 0.0013448797167935412,\n \"acc\": 0.31823545497683364,\n \"acc_stderr\": 0.008263105361288367\n },\n \"harness|drop|3\": {\n \"em\": 0.002726510067114094,\n \"em_stderr\": 0.0005340111700415918,\n \"f1\": 0.05529781879194656,\n \"f1_stderr\": 0.0013448797167935412\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.011372251705837756,\n \"acc_stderr\": 0.002920666198788727\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.6250986582478295,\n \"acc_stderr\": 0.013605544523788008\n }\n}\n```", "repo_url": "https://huggingface.co/TehVenom/DiffMerge_Pygmalion_Main-onto-V8P4", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_drop_3", "data_files": [{"split": "2023_10_15T10_35_56.777835", "path": ["**/details_harness|drop|3_2023-10-15T10-35-56.777835.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-10-15T10-35-56.777835.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_10_15T10_35_56.777835", "path": ["**/details_harness|gsm8k|5_2023-10-15T10-35-56.777835.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-10-15T10-35-56.777835.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_10_15T10_35_56.777835", "path": ["**/details_harness|winogrande|5_2023-10-15T10-35-56.777835.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-10-15T10-35-56.777835.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_10_15T10_35_56.777835", "path": ["results_2023-10-15T10-35-56.777835.parquet"]}, {"split": "latest", "path": ["results_2023-10-15T10-35-56.777835.parquet"]}]}]}
2023-10-15T09:36:08+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of TehVenom/DiffMerge_Pygmalion_Main-onto-V8P4 ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model TehVenom/DiffMerge_Pygmalion_Main-onto-V8P4 on the Open LLM Leaderboard. The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-10-15T10:35:56.777835(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of TehVenom/DiffMerge_Pygmalion_Main-onto-V8P4", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model TehVenom/DiffMerge_Pygmalion_Main-onto-V8P4 on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-15T10:35:56.777835(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of TehVenom/DiffMerge_Pygmalion_Main-onto-V8P4", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model TehVenom/DiffMerge_Pygmalion_Main-onto-V8P4 on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-15T10:35:56.777835(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 31, 31, 179, 66, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of TehVenom/DiffMerge_Pygmalion_Main-onto-V8P4## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model TehVenom/DiffMerge_Pygmalion_Main-onto-V8P4 on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-10-15T10:35:56.777835(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
4d755dcb2b2cf536f007ea4e0a053c845cd3bb92
# Dataset Card for Evaluation run of stabilityai/StableBeluga2 ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/stabilityai/StableBeluga2 - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [stabilityai/StableBeluga2](https://huggingface.co/stabilityai/StableBeluga2) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_stabilityai__StableBeluga2", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-10-15T10:41:03.838240](https://huggingface.co/datasets/open-llm-leaderboard/details_stabilityai__StableBeluga2/blob/main/results_2023-10-15T10-41-03.838240.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "em": 0.4326761744966443, "em_stderr": 0.005073838660621812, "f1": 0.5027527265100691, "f1_stderr": 0.0048086605803724005, "acc": 0.5940617757706712, "acc_stderr": 0.01188966924347996 }, "harness|drop|3": { "em": 0.4326761744966443, "em_stderr": 0.005073838660621812, "f1": 0.5027527265100691, "f1_stderr": 0.0048086605803724005 }, "harness|gsm8k|5": { "acc": 0.35860500379075055, "acc_stderr": 0.013210317364134026 }, "harness|winogrande|5": { "acc": 0.829518547750592, "acc_stderr": 0.010569021122825897 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_stabilityai__StableBeluga2
[ "region:us" ]
2023-10-15T09:41:07+00:00
{"pretty_name": "Evaluation run of stabilityai/StableBeluga2", "dataset_summary": "Dataset automatically created during the evaluation run of model [stabilityai/StableBeluga2](https://huggingface.co/stabilityai/StableBeluga2) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_stabilityai__StableBeluga2\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-10-15T10:41:03.838240](https://huggingface.co/datasets/open-llm-leaderboard/details_stabilityai__StableBeluga2/blob/main/results_2023-10-15T10-41-03.838240.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"em\": 0.4326761744966443,\n \"em_stderr\": 0.005073838660621812,\n \"f1\": 0.5027527265100691,\n \"f1_stderr\": 0.0048086605803724005,\n \"acc\": 0.5940617757706712,\n \"acc_stderr\": 0.01188966924347996\n },\n \"harness|drop|3\": {\n \"em\": 0.4326761744966443,\n \"em_stderr\": 0.005073838660621812,\n \"f1\": 0.5027527265100691,\n \"f1_stderr\": 0.0048086605803724005\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.35860500379075055,\n \"acc_stderr\": 0.013210317364134026\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.829518547750592,\n \"acc_stderr\": 0.010569021122825897\n }\n}\n```", "repo_url": "https://huggingface.co/stabilityai/StableBeluga2", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_drop_3", "data_files": [{"split": "2023_10_15T10_41_03.838240", "path": ["**/details_harness|drop|3_2023-10-15T10-41-03.838240.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-10-15T10-41-03.838240.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_10_15T10_41_03.838240", "path": ["**/details_harness|gsm8k|5_2023-10-15T10-41-03.838240.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-10-15T10-41-03.838240.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_10_15T10_41_03.838240", "path": ["**/details_harness|winogrande|5_2023-10-15T10-41-03.838240.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-10-15T10-41-03.838240.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_10_15T10_41_03.838240", "path": ["results_2023-10-15T10-41-03.838240.parquet"]}, {"split": "latest", "path": ["results_2023-10-15T10-41-03.838240.parquet"]}]}]}
2023-10-15T09:41:15+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of stabilityai/StableBeluga2 ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model stabilityai/StableBeluga2 on the Open LLM Leaderboard. The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-10-15T10:41:03.838240(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of stabilityai/StableBeluga2", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model stabilityai/StableBeluga2 on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-15T10:41:03.838240(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of stabilityai/StableBeluga2", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model stabilityai/StableBeluga2 on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-15T10:41:03.838240(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 18, 31, 166, 66, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of stabilityai/StableBeluga2## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model stabilityai/StableBeluga2 on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-10-15T10:41:03.838240(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
4aba2d1ad9eefabdcc3b07a9b99d87c5af7a7c87
# Dataset Card for "rbrt_uda_lrg_ep5_2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
carnival13/rbrt_uda_lrg_ep5_2
[ "region:us" ]
2023-10-15T09:46:38+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "domain_label", "dtype": "int64"}, {"name": "pass_label", "dtype": "int64"}, {"name": "input", "dtype": "string"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 1115662838, "num_examples": 755110}], "download_size": 352431197, "dataset_size": 1115662838}}
2023-10-15T09:47:34+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rbrt_uda_lrg_ep5_2" More Information needed
[ "# Dataset Card for \"rbrt_uda_lrg_ep5_2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rbrt_uda_lrg_ep5_2\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rbrt_uda_lrg_ep5_2\"\n\nMore Information needed" ]
5b1ff3b1352266874d9f11e23c4a485baf6b6f16
# Dataset Card for "wikipedia-augmented-chunked-128" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
legacy107/wikipedia-augmented-chunked-128
[ "region:us" ]
2023-10-15T09:52:03+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "History", "sequence": "string"}, {"name": "QuAC_dialog_id", "dtype": "string"}, {"name": "Question", "dtype": "string"}, {"name": "Question_no", "dtype": "int64"}, {"name": "Rewrite", "dtype": "string"}, {"name": "true_page_title", "dtype": "string"}, {"name": "true_contexts", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "true_contexts_wiki", "dtype": "string"}, {"name": "extractive", "dtype": "bool"}, {"name": "retrieved_contexts", "sequence": "string"}, {"name": "chunked_article", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 1589798324, "num_examples": 17183}, {"name": "test", "num_bytes": 282060219, "num_examples": 2882}], "download_size": 565470318, "dataset_size": 1871858543}}
2023-10-15T09:53:05+00:00
[]
[]
TAGS #region-us
# Dataset Card for "wikipedia-augmented-chunked-128" More Information needed
[ "# Dataset Card for \"wikipedia-augmented-chunked-128\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"wikipedia-augmented-chunked-128\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"wikipedia-augmented-chunked-128\"\n\nMore Information needed" ]
2776c5b4439eb7752b81b9bedff64d16d7f2de33
# Dataset Card for "qa_wikipedia_augmented_sentence_transformer_negative_farming_128" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
legacy107/qa_wikipedia_augmented_sentence_transformer_negative_farming_128
[ "region:us" ]
2023-10-15T10:35:16+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "Question", "dtype": "string"}, {"name": "Question_no", "dtype": "int64"}, {"name": "Rewrite", "dtype": "string"}, {"name": "true_page_title", "dtype": "string"}, {"name": "negatives", "sequence": "string"}, {"name": "positive", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 31497869, "num_examples": 6000}, {"name": "validation", "num_bytes": 6130773, "num_examples": 1183}], "download_size": 12202193, "dataset_size": 37628642}}
2023-10-15T10:35:23+00:00
[]
[]
TAGS #region-us
# Dataset Card for "qa_wikipedia_augmented_sentence_transformer_negative_farming_128" More Information needed
[ "# Dataset Card for \"qa_wikipedia_augmented_sentence_transformer_negative_farming_128\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"qa_wikipedia_augmented_sentence_transformer_negative_farming_128\"\n\nMore Information needed" ]
[ 6, 31 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"qa_wikipedia_augmented_sentence_transformer_negative_farming_128\"\n\nMore Information needed" ]
54caca1376fbd34951d1ed38fc331fe6c31b58a3
Dataset preprocessed from https://huggingface.co/datasets/Yukang/LongAlpaca-12k. This contains 1000 samples that have a minimum length of 16k tokens. ## Script to reproduce ```python from datasets import load_dataset from transformers import AutoTokenizer import pandas as pd import pyarrow as pa import pyarrow.parquet as pq # Load the dataset and tokenizer data = load_dataset("Yukang/LongAlpaca-12k") tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", trust_remote_code=True) def filter_function(batch): # Separate each round of conversation and concatenate them into single strings conversation_strs = [f'{instruction}\n\n{output}' for instruction, output in zip(batch['instruction'], batch['output'])] # Tokenize the strings without truncation tokens = tokenizer(conversation_strs, truncation=False, return_length=True) # Return True for examples where the token count exceeds max return [length > 16384 for length in tokens['length']] # Note that I've added a "keep" key to the return dictionary filtered_data = data.filter(filter_function, batched=True, batch_size=1000) # Convert to Pandas DataFrame df = pd.DataFrame(filtered_data['train']) # Sample 1k rows sampled_df = df.sample(n=1000, random_state=1) # Convert the Pandas DataFrame to a PyArrow Table table = pa.table(sampled_df) # Save the table as a Parquet file pq.write_table(table, 'data.parquet') ```
casperhansen/longalpaca_1k_unlimited_test
[ "license:cc-by-nc-4.0", "region:us" ]
2023-10-15T10:40:15+00:00
{"license": "cc-by-nc-4.0"}
2023-10-15T10:49:53+00:00
[]
[]
TAGS #license-cc-by-nc-4.0 #region-us
Dataset preprocessed from URL This contains 1000 samples that have a minimum length of 16k tokens. ## Script to reproduce
[ "## Script to reproduce" ]
[ "TAGS\n#license-cc-by-nc-4.0 #region-us \n", "## Script to reproduce" ]
[ 17, 5 ]
[ "passage: TAGS\n#license-cc-by-nc-4.0 #region-us \n## Script to reproduce" ]
f98e9807f48bc3aeabb0b7405b6c566031f3b1a6
Dataset preprocessed from https://huggingface.co/datasets/Yukang/LongAlpaca-12k. This contains 1000 samples that have a minimum length of 16k tokens and a maximum of 32k tokens. ## Script to reproduce ```python from datasets import load_dataset from transformers import AutoTokenizer import pandas as pd import pyarrow as pa import pyarrow.parquet as pq # Load the dataset and tokenizer data = load_dataset("Yukang/LongAlpaca-12k") tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", trust_remote_code=True) def filter_function(batch): # Separate each round of conversation and concatenate them into single strings conversation_strs = [f'{instruction}\n\n{output}' for instruction, output in zip(batch['instruction'], batch['output'])] # Tokenize the strings without truncation tokens = tokenizer(conversation_strs, truncation=False, return_length=True) # Return True for examples where the token count exceeds max return [length > 16384 and length <= 32768 for length in tokens['length']] # Note that I've added a "keep" key to the return dictionary filtered_data = data.filter(filter_function, batched=True, batch_size=1000) # Convert to Pandas DataFrame df = pd.DataFrame(filtered_data['train']) df = df.loc[:, ["input", "instruction", "output"]] # Sample 1k rows sampled_df = df.sample(n=1000, random_state=1) # Convert the Pandas DataFrame to a PyArrow Table table = pa.table(sampled_df) # Save the table as a Parquet file pq.write_table(table, 'data.parquet') ```
casperhansen/longalpaca_1k_test
[ "license:cc-by-nc-4.0", "region:us" ]
2023-10-15T10:48:27+00:00
{"license": "cc-by-nc-4.0"}
2023-10-15T10:55:55+00:00
[]
[]
TAGS #license-cc-by-nc-4.0 #region-us
Dataset preprocessed from URL This contains 1000 samples that have a minimum length of 16k tokens and a maximum of 32k tokens. ## Script to reproduce
[ "## Script to reproduce" ]
[ "TAGS\n#license-cc-by-nc-4.0 #region-us \n", "## Script to reproduce" ]
[ 17, 5 ]
[ "passage: TAGS\n#license-cc-by-nc-4.0 #region-us \n## Script to reproduce" ]
9b0e18f454bc16362f98340e4d2543bc2a41beee
[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.53169.svg)](https://doi.org/10.5281/zenodo.53169) # Collection of textures in colorectal cancer histology **Homepage**: https://zenodo.org/records/53169 \ **Publication Date**: 2016-05-26 \ **License**: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0/legalcode) \ **Citation**: ```bibtex @dataset{kather_2016_53169, author = {Kather, Jakob Nikolas and Zöllner, Frank Gerrit and Bianconi, Francesco and Melchers, Susanne M and Schad, Lothar R and Gaiser, Timo and Marx, Alexander and Weis, Cleo-Aron}, title = {{Collection of textures in colorectal cancer histology}}, month = jun, year = 2016, publisher = {Zenodo} } ``` ## Description This data set represents a collection of textures in histological images of human colorectal cancer. It contains 5000 histological images of 150 * 150 px each (74 * 74 µm). Each image belongs to exactly one of eight tissue categories. ## Image format All images are RGB, 0.495 µm per pixel, digitized with an Aperio ScanScope (Aperio/Leica biosystems), magnification 20x. Histological samples are fully anonymized images of formalin-fixed paraffin-embedded human colorectal adenocarcinomas (primary tumors) from our pathology archive (Institute of Pathology, University Medical Center Mannheim, Heidelberg University, Mannheim, Germany). ## Ethics statement All experiments were approved by the institutional ethics board (medical ethics board II, University Medical Center Mannheim, Heidelberg University, Germany; approval 2015-868R-MA). The institutional ethics board waived the need for informed consent for this retrospective analysis of anonymized samples. All experiments were carried out in accordance with the approved guidelines and with the Declaration of Helsinki. ## More information / data usage For more information, please refer to the following article. Please cite this article when using the data set. \ [Kather JN, Weis CA, Bianconi F, Melchers SM, Schad LR, Gaiser T, Marx A, Zollner F: Multi-class texture analysis in colorectal cancer histology (2016), Scientific Reports (in press)](https://doi.org/10.1038/srep27988) ## Contact For questions, please contact: \ Dr. Jakob Nikolas Kather \ http://orcid.org/0000-0002-3730-5348 \ ResearcherID: D-4279-2015
1aurent/Kather-texture-2016
[ "task_categories:image-classification", "size_categories:1K<n<10K", "license:cc-by-4.0", "biology", "Colorectal Pancer", "Histopathology", "Histology", "Digital Pathology", "region:us" ]
2023-10-15T10:53:57+00:00
{"license": "cc-by-4.0", "size_categories": ["1K<n<10K"], "task_categories": ["image-classification"], "tags": ["biology", "Colorectal Pancer", "Histopathology", "Histology", "Digital Pathology"], "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "ADIPOSE", "1": "COMPLEX", "2": "DEBRIS", "3": "EMPTY", "4": "LYMPHO", "5": "MUCOSA", "6": "STROMA", "7": "TUMOR"}}}}], "splits": [{"name": "train", "num_bytes": 329215083, "num_examples": 5000}], "download_size": 293441024, "dataset_size": 329215083}}
2023-10-15T11:10:50+00:00
[]
[]
TAGS #task_categories-image-classification #size_categories-1K<n<10K #license-cc-by-4.0 #biology #Colorectal Pancer #Histopathology #Histology #Digital Pathology #region-us
![DOI](URL # Collection of textures in colorectal cancer histology Homepage: URL \ Publication Date: 2016-05-26 \ License: Creative Commons Attribution 4.0 International \ Citation: ## Description This data set represents a collection of textures in histological images of human colorectal cancer. It contains 5000 histological images of 150 * 150 px each (74 * 74 µm). Each image belongs to exactly one of eight tissue categories. ## Image format All images are RGB, 0.495 µm per pixel, digitized with an Aperio ScanScope (Aperio/Leica biosystems), magnification 20x. Histological samples are fully anonymized images of formalin-fixed paraffin-embedded human colorectal adenocarcinomas (primary tumors) from our pathology archive (Institute of Pathology, University Medical Center Mannheim, Heidelberg University, Mannheim, Germany). ## Ethics statement All experiments were approved by the institutional ethics board (medical ethics board II, University Medical Center Mannheim, Heidelberg University, Germany; approval 2015-868R-MA). The institutional ethics board waived the need for informed consent for this retrospective analysis of anonymized samples. All experiments were carried out in accordance with the approved guidelines and with the Declaration of Helsinki. ## More information / data usage For more information, please refer to the following article. Please cite this article when using the data set. \ Kather JN, Weis CA, Bianconi F, Melchers SM, Schad LR, Gaiser T, Marx A, Zollner F: Multi-class texture analysis in colorectal cancer histology (2016), Scientific Reports (in press) ## Contact For questions, please contact: \ Dr. Jakob Nikolas Kather \ URL \ ResearcherID: D-4279-2015
[ "# Collection of textures in colorectal cancer histology\n\nHomepage: URL \\\nPublication Date: 2016-05-26 \\\nLicense: Creative Commons Attribution 4.0 International \\\nCitation:", "## Description\n\nThis data set represents a collection of textures in histological images of human colorectal cancer.\nIt contains 5000 histological images of 150 * 150 px each (74 * 74 µm). Each image belongs to exactly one of eight tissue categories.", "## Image format\n\nAll images are RGB, 0.495 µm per pixel, digitized with an Aperio ScanScope (Aperio/Leica biosystems), magnification 20x.\nHistological samples are fully anonymized images of formalin-fixed paraffin-embedded human colorectal adenocarcinomas (primary tumors) from our pathology archive\n(Institute of Pathology, University Medical Center Mannheim, Heidelberg University, Mannheim, Germany).", "## Ethics statement\n\nAll experiments were approved by the institutional ethics board (medical ethics board II, University Medical Center Mannheim, Heidelberg University, Germany; approval 2015-868R-MA).\nThe institutional ethics board waived the need for informed consent for this retrospective analysis of anonymized samples.\nAll experiments were carried out in accordance with the approved guidelines and with the Declaration of Helsinki.", "## More information / data usage\n\nFor more information, please refer to the following article. Please cite this article when using the data set. \\\nKather JN, Weis CA, Bianconi F, Melchers SM, Schad LR, Gaiser T, Marx A, Zollner F:\nMulti-class texture analysis in colorectal cancer histology (2016), Scientific Reports (in press)", "## Contact\n\nFor questions, please contact: \\\nDr. Jakob Nikolas Kather \\\nURL \\\nResearcherID: D-4279-2015" ]
[ "TAGS\n#task_categories-image-classification #size_categories-1K<n<10K #license-cc-by-4.0 #biology #Colorectal Pancer #Histopathology #Histology #Digital Pathology #region-us \n", "# Collection of textures in colorectal cancer histology\n\nHomepage: URL \\\nPublication Date: 2016-05-26 \\\nLicense: Creative Commons Attribution 4.0 International \\\nCitation:", "## Description\n\nThis data set represents a collection of textures in histological images of human colorectal cancer.\nIt contains 5000 histological images of 150 * 150 px each (74 * 74 µm). Each image belongs to exactly one of eight tissue categories.", "## Image format\n\nAll images are RGB, 0.495 µm per pixel, digitized with an Aperio ScanScope (Aperio/Leica biosystems), magnification 20x.\nHistological samples are fully anonymized images of formalin-fixed paraffin-embedded human colorectal adenocarcinomas (primary tumors) from our pathology archive\n(Institute of Pathology, University Medical Center Mannheim, Heidelberg University, Mannheim, Germany).", "## Ethics statement\n\nAll experiments were approved by the institutional ethics board (medical ethics board II, University Medical Center Mannheim, Heidelberg University, Germany; approval 2015-868R-MA).\nThe institutional ethics board waived the need for informed consent for this retrospective analysis of anonymized samples.\nAll experiments were carried out in accordance with the approved guidelines and with the Declaration of Helsinki.", "## More information / data usage\n\nFor more information, please refer to the following article. Please cite this article when using the data set. \\\nKather JN, Weis CA, Bianconi F, Melchers SM, Schad LR, Gaiser T, Marx A, Zollner F:\nMulti-class texture analysis in colorectal cancer histology (2016), Scientific Reports (in press)", "## Contact\n\nFor questions, please contact: \\\nDr. Jakob Nikolas Kather \\\nURL \\\nResearcherID: D-4279-2015" ]
[ 61, 39, 61, 105, 94, 85, 30 ]
[ "passage: TAGS\n#task_categories-image-classification #size_categories-1K<n<10K #license-cc-by-4.0 #biology #Colorectal Pancer #Histopathology #Histology #Digital Pathology #region-us \n# Collection of textures in colorectal cancer histology\n\nHomepage: URL \\\nPublication Date: 2016-05-26 \\\nLicense: Creative Commons Attribution 4.0 International \\\nCitation:## Description\n\nThis data set represents a collection of textures in histological images of human colorectal cancer.\nIt contains 5000 histological images of 150 * 150 px each (74 * 74 µm). Each image belongs to exactly one of eight tissue categories.## Image format\n\nAll images are RGB, 0.495 µm per pixel, digitized with an Aperio ScanScope (Aperio/Leica biosystems), magnification 20x.\nHistological samples are fully anonymized images of formalin-fixed paraffin-embedded human colorectal adenocarcinomas (primary tumors) from our pathology archive\n(Institute of Pathology, University Medical Center Mannheim, Heidelberg University, Mannheim, Germany).## Ethics statement\n\nAll experiments were approved by the institutional ethics board (medical ethics board II, University Medical Center Mannheim, Heidelberg University, Germany; approval 2015-868R-MA).\nThe institutional ethics board waived the need for informed consent for this retrospective analysis of anonymized samples.\nAll experiments were carried out in accordance with the approved guidelines and with the Declaration of Helsinki.## More information / data usage\n\nFor more information, please refer to the following article. Please cite this article when using the data set. \\\nKather JN, Weis CA, Bianconi F, Melchers SM, Schad LR, Gaiser T, Marx A, Zollner F:\nMulti-class texture analysis in colorectal cancer histology (2016), Scientific Reports (in press)## Contact\n\nFor questions, please contact: \\\nDr. Jakob Nikolas Kather \\\nURL \\\nResearcherID: D-4279-2015" ]
bb6231b50e7946d00a84198a0a17155e85ea208c
# Dataset Card for "medical_healthwa_2.0" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
atom92/medical_healthwa_2.0
[ "region:us" ]
2023-10-15T11:41:19+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1544703, "num_examples": 7360}], "download_size": 501374, "dataset_size": 1544703}}
2023-10-15T11:41:21+00:00
[]
[]
TAGS #region-us
# Dataset Card for "medical_healthwa_2.0" More Information needed
[ "# Dataset Card for \"medical_healthwa_2.0\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"medical_healthwa_2.0\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"medical_healthwa_2.0\"\n\nMore Information needed" ]
e1764dc44dc7ee39181b8ad78c5bff3f158151cd
# Tweet Annotation Sensitivity Experiment 1: Annotation in Six Experimental Conditions ***<font color= red>Attention: This repository contains cases that might be offensive or upsetting. We do not support the views expressed in these hateful posts.</font>*** ## Description We drew a stratified sample of 20 tweets, that were pre-annotated in a study by [Davidson et al. (2017)](https://ojs.aaai.org/index.php/ICWSM/article/view/14955) for Hate Speech / Offensive Language / Neither. The stratification was done with respect to majority-voted class and level of disagreement. We then recruited 1000 [Prolific](https://www.prolific.com/) workers to annotate each of the 20 tweets. Annotators were randomly selected into one of six experimental conditions, as shown in the following figures. In these conditions, they were asked to assign the labels Hate Speech / Offensive Language / Neither. In addition, we collected a variety of demographic variables (e.g. age and gender) and some para data (e.g. duration of the whole task, duration per screen). <img src="https://raw.githubusercontent.com/boleima/tweet-annotation-supplements/main/figs/image.png" width = "500" height = "250" alt="" align=center /> ## Examples of Annotation Conditions Here we show some examples of the annotation conditions: Condition A <img src="https://raw.githubusercontent.com/boleima/tweet-annotation-supplements/main/figs/condition_a.png" width = "500" height = "250" alt="" align=center /> Condition C <img src="https://raw.githubusercontent.com/boleima/tweet-annotation-supplements/main/figs/condition_c.png" width = "500" height = "250" alt="" align=center /> Condition E <img src="https://raw.githubusercontent.com/boleima/tweet-annotation-supplements/main/figs/condition_e.png" width = "500" height = "250" alt="" align=center /> ## Tweets We use the following 20 tweets from [Davidson et al. (2017)](https://ojs.aaai.org/index.php/ICWSM/article/view/14955). The usernames were hashed. | Tweet ID | Tweets Hashed | | -------------- | ------------------ | |1|Jihadi John has struck again. Prayers with the victims' families. What is the #bounty on His head? Time 4 him 2 meet allah| |2|RT @#####: Seriously, Florida. A vote for Charlie Christ is a vote for taxes and ebola.| |3|Islamic Jihadis run away to Pakistan @##### @##### #UniteBlue| |4|At this rate, I'd cheer for the awful New York Yankees over the St. Louis Cardinals.| |5|Allergies have my eyes all red and chink eyed, I look like I'm stoned| |6|Not protected in the state of Utah. There is no fear of queer couples forcing churches to marry them. They do not legally have to do so.| |7|RT @#####: Boss Niggas Fuk wit Boss bitches Vice versa..| |8|RT @#####: ever wanna text somebody and be like bitch die I hope you don't wake up you low down dirty muthafxcka&#128520| |9|@##### @##### @##### she's a right twat, I'd tell you, honestly Sky! Honestly &#128520| |10|So yall give farwells to Lulu yet did all but shit on Kris? Ok bitches... ok| |11|RT @#####: Dis bitch wanna smoke all our weed| |12|Shittin on all my old hoes| |13|Getting hit off a cunt thats about 6 foot sideways and a cunt that was in the army lol, i dini care, yous are fuckin laughable| |14|When it rains it's good "go to the mall weather" all the coons be at home| |15|Don't mind black people.....hate niggers!| |16|My eyes feel so chink eyed| |17|"From now on, I will call all radical MUSLIMS niggers!It is very fitting and it is racist! I AM RACIST AGAINST ANYONE WHO HATES AMERICA!"""| |18|RT @#####: Fucking chinks in Clash of Clans always fuck me up. Bitch ass kamikaze faggots.| |19|@##### ya I know all the slang I'm racist I h8 porch monkies| |20|RT @#####: Stupid fucking nigger LeBron. You flopping stupid jungle bunny monkey faggot.| ## Codebook | Column Name | Description | Type | | -------------- | ------------------ |---------------- | |id|annotator ID|integer| |age|Age|integer| |gender|Gender<br> 1: Female<br>2: Male<br> 3: Something Else<br> 4: Prefer not to say<br> |factor| |afam|African-American<br> 0: No<br> 1: Yes|binary| |asian|Asian-American<br> 0: No<br> 1: Yes|binary| |hispanic|Hispanic<br> 0: No<br> 1: Yes|binary| |white|White<br> 0: No<br> 1: Yes|binary| |race_other|Other race/ethnicity<br> 0: No<br> 1: Yes|binary| |race_not_say|Prefer not to say race/ethnicity<br> 0: No<br> 1: Yes|binary| |education|Highest educational attainment<br> 1: Less than high school<br>2: High school<br> 3: Some college<br> 4: College graduate<br> 5: Master's degree or professional degree (Law, Medicine, MPH, etc.) <br> 6: Doctoral degree (PhD, DPH, EdD, etc.)|factor| |sexuality|Sexuality<br> 1: Gay or Lesbian<br>2: Bisexual<br> 3: Straight<br> 4: Something Else<br> |factor| |english|English first language? <br> 0: No<br> 1: Yes|binary| |tw_use|Twitter Use <br> 1: Most days<br>2: Most weeks, but not every day<br> 3: A few times a month<br> 4: A few times a year<br> 5: Less often <br> 6: Never|factor| |social_media_use|Social Media Use<br> 1: Most days<br>2: Most weeks, but not every day<br> 3: A few times a month<br> 4: A few times a year<br> 5: Less often <br> 0: Never|factor| |prolific_hours|Prolific hours worked last month|integer| |task_fun|Coding work was: fun<br> 0: No<br> 1: Yes|binary| |task_interesting|Coding work was: interesting<br> 0: No<br> 1: Yes|binary| |task_boring|Coding work was: boring<br> 0: No<br> 1: Yes|binary| |task_repetitive|Coding work was: repetitive<br> 0: No<br> 1: Yes|binary| |task_important|Coding work was: important<br> 0: No<br> 1: Yes|binary| |task_depressing|Coding work was: depressing<br> 0: No<br> 1: Yes|binary| |task_offensive|Coding work was: offensive<br> 0: No<br> 1: Yes|binary| |another_tweettask|Likelihood to do another Tweet related task<br> not at all: Not at all likely<br> somewhat: Somewhat likely<br> very: Very likely|factor| |another_hatetask|Likelihood to do another Hate Speech related task<br> not at all: Not at all likely<br> somewhat: Somewhat likely<br> very: Very likely|factor| |page_history|Order in which annotator saw pages|character| |date_of_first_access|Datetime of first access|datetime| |date_of_last_access|Datetime of last access|datetime| |duration_sec|Task duration in seconds|integer| |version|Version of annotation task <br> A: Version A<br>B: Version B<br> C: Version C<br> D: Version D<br> E: Version E<br> F: Version F|factor| |tw1-20|Label assigned to Tweet 1-20<br> hate speech: Hate Speech<br> offensive language: Offensive Language<br> neither: Neither HS nor OL <br> NA: Missing or "don't know"|factor| |tw_duration_1-20|Annotation duration in milliseconds Tweet 1-20|numerical| |num_approvals|Prolific data: number of previous task approvals of annotator|integer| |num_rejections|Prolific data: number of previous task rejections of annotator|integer| |prolific_score|Annotator quality score by Prolific|numerical| |countryofbirth|Prolific data: Annotator country of birth|character| |currentcountryofresidence|Prolific data: Annotator country of residence|character| |employmentstatus|Prolific data: Annotator Employment Status<br> Full-timePart-time<br> Unemployed (and job-seeking)<br> Due to start a new job within the next month<br> Not in paid work (e.g. homemaker, retired or disabled)<br> Other<br> DATA EXPIRED|factor| |firstlanguage|Prolific data: Annotator first language|character| |nationality|Prolific data: Nationality|character| |studentstatus|Prolific data: Student status<br> Yes<br> No <br> DATA EXPIRED|factor| ## Citation If you found the dataset useful, please cite: ``` @InProceedings{beck2022, author="Beck, Jacob and Eckman, Stephanie and Chew, Rob and Kreuter, Frauke", editor="Chen, Jessie Y. C. and Fragomeni, Gino and Degen, Helmut and Ntoa, Stavroula", title="Improving Labeling Through Social Science Insights: Results and Research Agenda", booktitle="HCI International 2022 -- Late Breaking Papers: Interacting with eXtended Reality and Artificial Intelligence", year="2022", publisher="Springer Nature Switzerland", address="Cham", pages="245--261", isbn="978-3-031-21707-4" } ```
soda-lmu/tweet-annotation-sensitivity-1
[ "task_categories:text-classification", "task_ids:sentiment-classification", "task_ids:hate-speech-detection", "size_categories:1K<n<10K", "language:en", "region:us" ]
2023-10-15T11:48:26+00:00
{"language": ["en"], "size_categories": ["1K<n<10K"], "task_categories": ["text-classification"], "task_ids": ["sentiment-classification", "hate-speech-detection"]}
2023-10-20T21:40:05+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #task_ids-sentiment-classification #task_ids-hate-speech-detection #size_categories-1K<n<10K #language-English #region-us
Tweet Annotation Sensitivity Experiment 1: Annotation in Six Experimental Conditions ==================================================================================== *Attention: This repository contains cases that might be offensive or upsetting. We do not support the views expressed in these hateful posts.* Description ----------- We drew a stratified sample of 20 tweets, that were pre-annotated in a study by Davidson et al. (2017) for Hate Speech / Offensive Language / Neither. The stratification was done with respect to majority-voted class and level of disagreement. We then recruited 1000 Prolific workers to annotate each of the 20 tweets. Annotators were randomly selected into one of six experimental conditions, as shown in the following figures. In these conditions, they were asked to assign the labels Hate Speech / Offensive Language / Neither. In addition, we collected a variety of demographic variables (e.g. age and gender) and some para data (e.g. duration of the whole task, duration per screen). <img src="URL width = "500" height = "250" alt="" align=center /> Examples of Annotation Conditions --------------------------------- Here we show some examples of the annotation conditions: Condition A <img src="URL width = "500" height = "250" alt="" align=center /> Condition C <img src="URL width = "500" height = "250" alt="" align=center /> Condition E <img src="URL width = "500" height = "250" alt="" align=center /> Tweets ------ We use the following 20 tweets from Davidson et al. (2017). The usernames were hashed. Codebook -------- Column Name: id, Description: annotator ID, Type: integer Column Name: age, Description: Age, Type: integer Column Name: gender, Description: Gender 1: Female 2: Male 3: Something Else 4: Prefer not to say , Type: factor Column Name: afam, Description: African-American 0: No 1: Yes, Type: binary Column Name: asian, Description: Asian-American 0: No 1: Yes, Type: binary Column Name: hispanic, Description: Hispanic 0: No 1: Yes, Type: binary Column Name: white, Description: White 0: No 1: Yes, Type: binary Column Name: race\_other, Description: Other race/ethnicity 0: No 1: Yes, Type: binary Column Name: race\_not\_say, Description: Prefer not to say race/ethnicity 0: No 1: Yes, Type: binary Column Name: education, Description: Highest educational attainment 1: Less than high school 2: High school 3: Some college 4: College graduate 5: Master's degree or professional degree (Law, Medicine, MPH, etc.) 6: Doctoral degree (PhD, DPH, EdD, etc.), Type: factor Column Name: sexuality, Description: Sexuality 1: Gay or Lesbian 2: Bisexual 3: Straight 4: Something Else , Type: factor Column Name: english, Description: English first language? 0: No 1: Yes, Type: binary Column Name: tw\_use, Description: Twitter Use 1: Most days 2: Most weeks, but not every day 3: A few times a month 4: A few times a year 5: Less often 6: Never, Type: factor Column Name: social\_media\_use, Description: Social Media Use 1: Most days 2: Most weeks, but not every day 3: A few times a month 4: A few times a year 5: Less often 0: Never, Type: factor Column Name: prolific\_hours, Description: Prolific hours worked last month, Type: integer Column Name: task\_fun, Description: Coding work was: fun 0: No 1: Yes, Type: binary Column Name: task\_interesting, Description: Coding work was: interesting 0: No 1: Yes, Type: binary Column Name: task\_boring, Description: Coding work was: boring 0: No 1: Yes, Type: binary Column Name: task\_repetitive, Description: Coding work was: repetitive 0: No 1: Yes, Type: binary Column Name: task\_important, Description: Coding work was: important 0: No 1: Yes, Type: binary Column Name: task\_depressing, Description: Coding work was: depressing 0: No 1: Yes, Type: binary Column Name: task\_offensive, Description: Coding work was: offensive 0: No 1: Yes, Type: binary Column Name: another\_tweettask, Description: Likelihood to do another Tweet related task not at all: Not at all likely somewhat: Somewhat likely very: Very likely, Type: factor Column Name: another\_hatetask, Description: Likelihood to do another Hate Speech related task not at all: Not at all likely somewhat: Somewhat likely very: Very likely, Type: factor Column Name: page\_history, Description: Order in which annotator saw pages, Type: character Column Name: date\_of\_first\_access, Description: Datetime of first access, Type: datetime Column Name: date\_of\_last\_access, Description: Datetime of last access, Type: datetime Column Name: duration\_sec, Description: Task duration in seconds, Type: integer Column Name: version, Description: Version of annotation task A: Version A B: Version B C: Version C D: Version D E: Version E F: Version F, Type: factor Column Name: tw1-20, Description: Label assigned to Tweet 1-20 hate speech: Hate Speech offensive language: Offensive Language neither: Neither HS nor OL NA: Missing or "don't know", Type: factor Column Name: tw\_duration\_1-20, Description: Annotation duration in milliseconds Tweet 1-20, Type: numerical Column Name: num\_approvals, Description: Prolific data: number of previous task approvals of annotator, Type: integer Column Name: num\_rejections, Description: Prolific data: number of previous task rejections of annotator, Type: integer Column Name: prolific\_score, Description: Annotator quality score by Prolific, Type: numerical Column Name: countryofbirth, Description: Prolific data: Annotator country of birth, Type: character Column Name: currentcountryofresidence, Description: Prolific data: Annotator country of residence, Type: character Column Name: employmentstatus, Description: Prolific data: Annotator Employment Status Full-timePart-time Unemployed (and job-seeking) Due to start a new job within the next month Not in paid work (e.g. homemaker, retired or disabled) Other DATA EXPIRED, Type: factor Column Name: firstlanguage, Description: Prolific data: Annotator first language, Type: character Column Name: nationality, Description: Prolific data: Nationality, Type: character Column Name: studentstatus, Description: Prolific data: Student status Yes No DATA EXPIRED, Type: factor If you found the dataset useful, please cite:
[]
[ "TAGS\n#task_categories-text-classification #task_ids-sentiment-classification #task_ids-hate-speech-detection #size_categories-1K<n<10K #language-English #region-us \n" ]
[ 58 ]
[ "passage: TAGS\n#task_categories-text-classification #task_ids-sentiment-classification #task_ids-hate-speech-detection #size_categories-1K<n<10K #language-English #region-us \n" ]
a8a9b7757022158b6bd0c18e095338bf29b915ab
# Dataset Card for "medical_healthwa_3.0" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
atom92/medical_healthwa_3.0
[ "region:us" ]
2023-10-15T11:53:13+00:00
{"dataset_info": {"features": [{"name": "text", "struct": [{"name": "text", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 2710809, "num_examples": 7360}], "download_size": 586464, "dataset_size": 2710809}}
2023-10-15T11:53:16+00:00
[]
[]
TAGS #region-us
# Dataset Card for "medical_healthwa_3.0" More Information needed
[ "# Dataset Card for \"medical_healthwa_3.0\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"medical_healthwa_3.0\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"medical_healthwa_3.0\"\n\nMore Information needed" ]
8db9a812181b0fad765850f4e62ca14dce421d64
# Dataset Card for "Translate_all_mixed_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
DigirentEnterprise/Translate_all_mixed_dataset
[ "region:us" ]
2023-10-15T12:05:25+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "ouput", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "instruction", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1543490120, "num_examples": 3370045}], "download_size": 950032312, "dataset_size": 1543490120}}
2023-10-15T12:16:23+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Translate_all_mixed_dataset" More Information needed
[ "# Dataset Card for \"Translate_all_mixed_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Translate_all_mixed_dataset\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"Translate_all_mixed_dataset\"\n\nMore Information needed" ]
64ef3ed0219c0f985e6c222f7b37a1c86eb80fab
# SBS Cantonese Speech Corpus This speech corpus contains **435 hours** of [SBS Cantonese](https://www.sbs.com.au/language/chinese/zh-hant/podcast/sbs-cantonese) podcasts from Auguest 2022 to October 2023. There are **2,519 episodes** and each episode is split into segments that are at most 10 seconds long. In total, there are **189,216 segments** in this corpus. Here is a breakdown on the categories of episodes present in this dataset: <style> table th:first-of-type { width: 5%; } table th:nth-of-type(2) { width: 15%; } table th:nth-of-type(3) { width: 50%; } </style> | Category | SBS Channels | Episodes | |-------------------|----------------------|-------| | news | 中文新聞, 新聞簡報 | 622 | | business | 寰宇金融 | 148 | | vaccine | 疫苗快報 | 71 | | gardening | 園藝趣談 | 58 | | tech | 科技世界 | 56 | | health | 健康快樂人 | 53 | | culture | 文化360 | 49 | | english | 學英語 | 41 | | expert | 專家話你知 | 37 | | interview | 我不是名人 | 20 | | career | 澳洲招職 | 18 | | food | 美食速遞 | 18 | | uncategorized | n/a | 1328 | * Uncategorized episodes are mostly news but also contains other categories listed above. ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** Kevin Li - **Language(s):** Cantonese, English (only in podcasts categorized as "english") - **License:** Creative Commons Attribution Non-Commercial 4.0 ### Scraper <!-- Provide the basic links for the dataset. --> - **Repository:** https://github.com/AlienKevin/sbs_cantonese ## Uses Each episode is split into segments using [silero-vad](https://github.com/snakers4/silero-vad). Since silero-vad is not trained on Cantonese data, the segmentation is not ideal and often break sentences in the middle. Hence, this dataset is not intended to be used for supervised ASR. Instead, it is intended to be used for self-supervised speech pretraining, like training WavLM, HuBERT, and Wav2Vec. ### Format Each segment is stored as a monochannel FLAC file with a sample rate of 16k Hz. You can find the segments under the `audio/` folder, where groups of segments are bundled into a .tar.gz file for ease of distribution. The filename of the segment shows which episodes it belongs to and place of it within that episode: For example, here's a filename: ``` 0061gy0w8_0000_5664_81376 ``` where * `0061gy0w8` is the episode id * `0000` means that it is the first segment of that episode * `5664` is the starting sample of this segment. Remember all episodes are sampled at 16k Hz, so the total number of samples in an episode is (the duration in seconds * 16,000). * `81376` is the ending (exclusive) sample of this segment. ### Metadata Metadata for each episode is stored in the `metadata.jsonl` file, where each line stores the metadata for one episode: Here's the metadata for one of the episodes (split into multiple lines for clarity): ```json { "title": "SBS 中文新聞 (7月5日)", "date": "05/07/2023", "view_more_link": "https://www.sbs.com.au/language/chinese/zh-hant/podcast-episode/chinese-news-5-7-2023/tl6s68rdk", "download_link": "https://sbs-podcast.streamguys1.com/sbs-cantonese/20230705105920-cantonese-0288b7c2-cb6d-4e0e-aec2-2680dd8738e0.mp3?awCollectionId=sbs-cantonese&awGenre=News&awEpisodeId=20230705105920-cantonese-0288b7c2-cb6d-4e0e-aec2-2680dd8738e0" } ``` where * `title` is the title of the episode * `date` is the date when the episode is published * `view_more_link` is a link to the associated article/description for this episode. Many news episodes have extremely detailed manuscripts written in Traditional Chinese while others have briefer summaries or key points available. * `download_link` is the link to download the audio for this episode. It is usually hosted on [streamguys](https://www.streamguys.com/) but some earlier episodes are stored SBS's own server at https://images.sbs.com.au. The id of each episode appears at the end of its `view_more_link`. It appears to be a precomputed hash that is unique to each episode. ```python id = view_more_link.split("/")[-1] ```
AlienKevin/sbs_cantonese
[ "size_categories:100K<n<1M", "language:yue", "license:cc-by-nc-4.0", "region:us" ]
2023-10-15T12:17:51+00:00
{"language": ["yue"], "license": "cc-by-nc-4.0", "size_categories": ["100K<n<1M"], "pretty_name": "SBS Cantonese Speech Corpus"}
2023-10-15T20:57:53+00:00
[]
[ "yue" ]
TAGS #size_categories-100K<n<1M #language-Yue Chinese #license-cc-by-nc-4.0 #region-us
SBS Cantonese Speech Corpus =========================== This speech corpus contains 435 hours of SBS Cantonese podcasts from Auguest 2022 to October 2023. There are 2,519 episodes and each episode is split into segments that are at most 10 seconds long. In total, there are 189,216 segments in this corpus. Here is a breakdown on the categories of episodes present in this dataset: table th:first-of-type { width: 5%; } table th:nth-of-type(2) { width: 15%; } table th:nth-of-type(3) { width: 50%; } Category: news, SBS Channels: 中文新聞, 新聞簡報, Episodes: 622 Category: business, SBS Channels: 寰宇金融, Episodes: 148 Category: vaccine, SBS Channels: 疫苗快報, Episodes: 71 Category: gardening, SBS Channels: 園藝趣談, Episodes: 58 Category: tech, SBS Channels: 科技世界, Episodes: 56 Category: health, SBS Channels: 健康快樂人, Episodes: 53 Category: culture, SBS Channels: 文化360, Episodes: 49 Category: english, SBS Channels: 學英語, Episodes: 41 Category: expert, SBS Channels: 專家話你知, Episodes: 37 Category: interview, SBS Channels: 我不是名人, Episodes: 20 Category: career, SBS Channels: 澳洲招職, Episodes: 18 Category: food, SBS Channels: 美食速遞, Episodes: 18 Category: uncategorized, SBS Channels: n/a, Episodes: 1328 * Uncategorized episodes are mostly news but also contains other categories listed above. Dataset Details --------------- ### Dataset Description * Curated by: Kevin Li * Language(s): Cantonese, English (only in podcasts categorized as "english") * License: Creative Commons Attribution Non-Commercial 4.0 ### Scraper * Repository: URL Uses ---- Each episode is split into segments using silero-vad. Since silero-vad is not trained on Cantonese data, the segmentation is not ideal and often break sentences in the middle. Hence, this dataset is not intended to be used for supervised ASR. Instead, it is intended to be used for self-supervised speech pretraining, like training WavLM, HuBERT, and Wav2Vec. ### Format Each segment is stored as a monochannel FLAC file with a sample rate of 16k Hz. You can find the segments under the 'audio/' folder, where groups of segments are bundled into a .URL file for ease of distribution. The filename of the segment shows which episodes it belongs to and place of it within that episode: For example, here's a filename: where * '0061gy0w8' is the episode id * '0000' means that it is the first segment of that episode * '5664' is the starting sample of this segment. Remember all episodes are sampled at 16k Hz, so the total number of samples in an episode is (the duration in seconds \* 16,000). * '81376' is the ending (exclusive) sample of this segment. ### Metadata Metadata for each episode is stored in the 'URL' file, where each line stores the metadata for one episode: Here's the metadata for one of the episodes (split into multiple lines for clarity): where * 'title' is the title of the episode * 'date' is the date when the episode is published * 'view\_more\_link' is a link to the associated article/description for this episode. Many news episodes have extremely detailed manuscripts written in Traditional Chinese while others have briefer summaries or key points available. * 'download\_link' is the link to download the audio for this episode. It is usually hosted on streamguys but some earlier episodes are stored SBS's own server at URL. The id of each episode appears at the end of its 'view\_more\_link'. It appears to be a precomputed hash that is unique to each episode.
[ "### Dataset Description\n\n\n* Curated by: Kevin Li\n* Language(s): Cantonese, English (only in podcasts categorized as \"english\")\n* License: Creative Commons Attribution Non-Commercial 4.0", "### Scraper\n\n\n* Repository: URL\n\n\nUses\n----\n\n\nEach episode is split into segments using silero-vad.\nSince silero-vad is not trained on Cantonese data, the segmentation is not ideal and often break sentences in the middle.\nHence, this dataset is not intended to be used for supervised ASR. Instead, it is intended to be used for self-supervised\nspeech pretraining, like training WavLM, HuBERT, and Wav2Vec.", "### Format\n\n\nEach segment is stored as a monochannel FLAC file with a sample rate of 16k Hz. You can find the segments under the 'audio/' folder,\nwhere groups of segments are bundled into a .URL file for ease of distribution.\n\n\nThe filename of the segment shows which episodes it belongs to and place of it within that episode:\nFor example, here's a filename:\n\n\nwhere\n\n\n* '0061gy0w8' is the episode id\n* '0000' means that it is the first segment of that episode\n* '5664' is the starting sample of this segment. Remember all episodes are sampled at 16k Hz, so the total number of samples\nin an episode is (the duration in seconds \\* 16,000).\n* '81376' is the ending (exclusive) sample of this segment.", "### Metadata\n\n\nMetadata for each episode is stored in the 'URL' file, where each line stores the metadata for one episode:\nHere's the metadata for one of the episodes (split into multiple lines for clarity):\n\n\nwhere\n\n\n* 'title' is the title of the episode\n* 'date' is the date when the episode is published\n* 'view\\_more\\_link' is a link to the associated article/description for this episode.\nMany news episodes have extremely detailed manuscripts written in Traditional Chinese while others have briefer summaries or key points available.\n* 'download\\_link' is the link to download the audio for this episode. It is usually hosted on streamguys but some earlier episodes\nare stored SBS's own server at URL.\n\n\nThe id of each episode appears at the end of its 'view\\_more\\_link'. It appears to be a precomputed hash that is unique to each episode." ]
[ "TAGS\n#size_categories-100K<n<1M #language-Yue Chinese #license-cc-by-nc-4.0 #region-us \n", "### Dataset Description\n\n\n* Curated by: Kevin Li\n* Language(s): Cantonese, English (only in podcasts categorized as \"english\")\n* License: Creative Commons Attribution Non-Commercial 4.0", "### Scraper\n\n\n* Repository: URL\n\n\nUses\n----\n\n\nEach episode is split into segments using silero-vad.\nSince silero-vad is not trained on Cantonese data, the segmentation is not ideal and often break sentences in the middle.\nHence, this dataset is not intended to be used for supervised ASR. Instead, it is intended to be used for self-supervised\nspeech pretraining, like training WavLM, HuBERT, and Wav2Vec.", "### Format\n\n\nEach segment is stored as a monochannel FLAC file with a sample rate of 16k Hz. You can find the segments under the 'audio/' folder,\nwhere groups of segments are bundled into a .URL file for ease of distribution.\n\n\nThe filename of the segment shows which episodes it belongs to and place of it within that episode:\nFor example, here's a filename:\n\n\nwhere\n\n\n* '0061gy0w8' is the episode id\n* '0000' means that it is the first segment of that episode\n* '5664' is the starting sample of this segment. Remember all episodes are sampled at 16k Hz, so the total number of samples\nin an episode is (the duration in seconds \\* 16,000).\n* '81376' is the ending (exclusive) sample of this segment.", "### Metadata\n\n\nMetadata for each episode is stored in the 'URL' file, where each line stores the metadata for one episode:\nHere's the metadata for one of the episodes (split into multiple lines for clarity):\n\n\nwhere\n\n\n* 'title' is the title of the episode\n* 'date' is the date when the episode is published\n* 'view\\_more\\_link' is a link to the associated article/description for this episode.\nMany news episodes have extremely detailed manuscripts written in Traditional Chinese while others have briefer summaries or key points available.\n* 'download\\_link' is the link to download the audio for this episode. It is usually hosted on streamguys but some earlier episodes\nare stored SBS's own server at URL.\n\n\nThe id of each episode appears at the end of its 'view\\_more\\_link'. It appears to be a precomputed hash that is unique to each episode." ]
[ 35, 47, 110, 188, 208 ]
[ "passage: TAGS\n#size_categories-100K<n<1M #language-Yue Chinese #license-cc-by-nc-4.0 #region-us \n### Dataset Description\n\n\n* Curated by: Kevin Li\n* Language(s): Cantonese, English (only in podcasts categorized as \"english\")\n* License: Creative Commons Attribution Non-Commercial 4.0### Scraper\n\n\n* Repository: URL\n\n\nUses\n----\n\n\nEach episode is split into segments using silero-vad.\nSince silero-vad is not trained on Cantonese data, the segmentation is not ideal and often break sentences in the middle.\nHence, this dataset is not intended to be used for supervised ASR. Instead, it is intended to be used for self-supervised\nspeech pretraining, like training WavLM, HuBERT, and Wav2Vec.### Format\n\n\nEach segment is stored as a monochannel FLAC file with a sample rate of 16k Hz. You can find the segments under the 'audio/' folder,\nwhere groups of segments are bundled into a .URL file for ease of distribution.\n\n\nThe filename of the segment shows which episodes it belongs to and place of it within that episode:\nFor example, here's a filename:\n\n\nwhere\n\n\n* '0061gy0w8' is the episode id\n* '0000' means that it is the first segment of that episode\n* '5664' is the starting sample of this segment. Remember all episodes are sampled at 16k Hz, so the total number of samples\nin an episode is (the duration in seconds \\* 16,000).\n* '81376' is the ending (exclusive) sample of this segment." ]
74e213e0bf03bac64497075263f2e48f424ac19b
# Dataset Card for privacy_dectection <!-- Provide a quick summary of the dataset. --> This dataset is used for the [Privacy Information Detection in Unstructured Business Text Information](https://www.datafountain.cn/competitions/472) competition, and was obtained through preprocessing the original dataset.
gyr66/privacy_detection
[ "task_categories:token-classification", "language:zh", "region:us" ]
2023-10-15T12:19:47+00:00
{"language": ["zh"], "task_categories": ["token-classification"], "dataset_info": {"config_name": "privacy_detection", "features": [{"name": "id", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "ner_tags", "sequence": {"class_label": {"names": {"0": "O", "1": "B-position", "2": "I-position", "3": "B-name", "4": "I-name", "5": "B-movie", "6": "I-movie", "7": "B-organization", "8": "I-organization", "9": "B-company", "10": "I-company", "11": "B-book", "12": "I-book", "13": "B-address", "14": "I-address", "15": "B-scene", "16": "I-scene", "17": "B-mobile", "18": "I-mobile", "19": "B-email", "20": "I-email", "21": "B-game", "22": "I-game", "23": "B-government", "24": "I-government", "25": "B-QQ", "26": "I-QQ", "27": "B-vx", "28": "I-vx"}}}}], "splits": [{"name": "train", "num_bytes": 4899635, "num_examples": 2515}], "download_size": 3290405, "dataset_size": 4899635}}
2023-10-17T09:41:59+00:00
[]
[ "zh" ]
TAGS #task_categories-token-classification #language-Chinese #region-us
# Dataset Card for privacy_dectection This dataset is used for the Privacy Information Detection in Unstructured Business Text Information competition, and was obtained through preprocessing the original dataset.
[ "# Dataset Card for privacy_dectection\n\n\n\nThis dataset is used for the Privacy Information Detection in Unstructured Business Text Information competition, and was obtained through preprocessing the original dataset." ]
[ "TAGS\n#task_categories-token-classification #language-Chinese #region-us \n", "# Dataset Card for privacy_dectection\n\n\n\nThis dataset is used for the Privacy Information Detection in Unstructured Business Text Information competition, and was obtained through preprocessing the original dataset." ]
[ 23, 43 ]
[ "passage: TAGS\n#task_categories-token-classification #language-Chinese #region-us \n# Dataset Card for privacy_dectection\n\n\n\nThis dataset is used for the Privacy Information Detection in Unstructured Business Text Information competition, and was obtained through preprocessing the original dataset." ]