sha
stringlengths
40
40
text
stringlengths
1
13.4M
id
stringlengths
2
117
tags
listlengths
1
7.91k
created_at
stringlengths
25
25
metadata
stringlengths
2
875k
last_modified
stringlengths
25
25
arxiv
listlengths
0
25
languages
listlengths
0
7.91k
tags_str
stringlengths
17
159k
text_str
stringlengths
1
447k
text_lists
listlengths
0
352
processed_texts
listlengths
1
353
tokens_length
listlengths
1
353
input_texts
listlengths
1
40
126d3a3e91485f63674e17a83b7f9b4b673c0f75
# Dataset Card for "MedQA_Dutch_translated_with_MariaNMT" Translation of the **English** version of [MedQA](https://huggingface.co/datasets/bigbio/med_qa), to **Dutch** using an [Maria NMT model](https://marian-nmt.github.io/), trained by [Helsinki NLP](https://huggingface.co/Helsinki-NLP/opus-mt-en-nl). Note, for reference: Maria NMT is based on [BART](https://huggingface.co/docs/transformers/model_doc/bart), described [here](https://arxiv.org/abs/1910.13461). Note: We do **not** have the full sample count of the original MedQA due to exceedance of the maximum window size. In updated version we will use stride to translate complete documents. # Attribution If you use this dataset please use the following to credit the creators of MedQA: ```citation @article{jin2021disease, title={What disease does this patient have? a large-scale open domain question answering dataset from medical exams}, author={Jin, Di and Pan, Eileen and Oufattole, Nassim and Weng, Wei-Hung and Fang, Hanyi and Szolovits, Peter}, journal={Applied Sciences}, volume={11}, number={14}, pages={6421}, year={2021}, publisher={MDPI} } ``` The creators of the OPUS-MT models: ``` @InProceedings{TiedemannThottingal:EAMT2020, author = {J{\"o}rg Tiedemann and Santhosh Thottingal}, title = {{OPUS-MT} — {B}uilding open translation services for the {W}orld}, booktitle = {Proceedings of the 22nd Annual Conferenec of the European Association for Machine Translation (EAMT)}, year = {2020}, address = {Lisbon, Portugal} } ``` and ``` @misc {van_es_2023, author = { {Bram van Es} }, title = { MedQA_Dutch_translated_with_MariaNMT (Revision 7e88c9e) }, year = 2023, url = { https://huggingface.co/datasets/UMCU/MedQA_Dutch_translated_with_MariaNMT }, doi = { 10.57967/hf/1355 }, publisher = { Hugging Face } } ``` # License For both the Maria NMT model and the original [Helsinki NLP](https://twitter.com/HelsinkiNLP) [Opus MT model](https://huggingface.co/Helsinki-NLP) we did **not** find a license. We also did not find a license for the MedQA corpus. For these reasons we use a permissive [CC BY](https://wellcome.org/grant-funding/guidance/open-access-guidance/creative-commons-attribution-licence-cc) license. If this was in error please let us know and we will add the appropriate licensing promptly.
UMCU/MedQA_Dutch_translated_with_MariaNMT
[ "arxiv:1910.13461", "doi:10.57967/hf/1355", "region:us" ]
2023-11-04T00:10:10+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 8270752, "num_examples": 9856}], "download_size": 4467728, "dataset_size": 8270752}}
2023-11-17T10:14:20+00:00
[ "1910.13461" ]
[]
TAGS #arxiv-1910.13461 #doi-10.57967/hf/1355 #region-us
# Dataset Card for "MedQA_Dutch_translated_with_MariaNMT" Translation of the English version of MedQA, to Dutch using an Maria NMT model, trained by Helsinki NLP. Note, for reference: Maria NMT is based on BART, described here. Note: We do not have the full sample count of the original MedQA due to exceedance of the maximum window size. In updated version we will use stride to translate complete documents. # Attribution If you use this dataset please use the following to credit the creators of MedQA: The creators of the OPUS-MT models: and # License For both the Maria NMT model and the original Helsinki NLP Opus MT model we did not find a license. We also did not find a license for the MedQA corpus. For these reasons we use a permissive CC BY license. If this was in error please let us know and we will add the appropriate licensing promptly.
[ "# Dataset Card for \"MedQA_Dutch_translated_with_MariaNMT\"\n\n\nTranslation of the English version of MedQA,\nto Dutch using an Maria NMT model, trained by Helsinki NLP.\nNote, for reference: Maria NMT is based on BART, described here.\n\nNote:\nWe do not have the full sample count of the original MedQA due to exceedance of the maximum window size. \nIn updated version we will use stride to translate complete documents.", "# Attribution\n\nIf you use this dataset please use the following to credit the creators of MedQA:\n\n\n\nThe creators of the OPUS-MT models:\n\n\nand", "# License\n\nFor both the Maria NMT model and the original Helsinki NLP Opus MT model \nwe did not find a license. We also did not find a license for the MedQA corpus. For these reasons we use a permissive CC BY \nlicense. If this was in error please let us know and we will add the appropriate licensing promptly." ]
[ "TAGS\n#arxiv-1910.13461 #doi-10.57967/hf/1355 #region-us \n", "# Dataset Card for \"MedQA_Dutch_translated_with_MariaNMT\"\n\n\nTranslation of the English version of MedQA,\nto Dutch using an Maria NMT model, trained by Helsinki NLP.\nNote, for reference: Maria NMT is based on BART, described here.\n\nNote:\nWe do not have the full sample count of the original MedQA due to exceedance of the maximum window size. \nIn updated version we will use stride to translate complete documents.", "# Attribution\n\nIf you use this dataset please use the following to credit the creators of MedQA:\n\n\n\nThe creators of the OPUS-MT models:\n\n\nand", "# License\n\nFor both the Maria NMT model and the original Helsinki NLP Opus MT model \nwe did not find a license. We also did not find a license for the MedQA corpus. For these reasons we use a permissive CC BY \nlicense. If this was in error please let us know and we will add the appropriate licensing promptly." ]
[ 27, 104, 33, 72 ]
[ "passage: TAGS\n#arxiv-1910.13461 #doi-10.57967/hf/1355 #region-us \n# Dataset Card for \"MedQA_Dutch_translated_with_MariaNMT\"\n\n\nTranslation of the English version of MedQA,\nto Dutch using an Maria NMT model, trained by Helsinki NLP.\nNote, for reference: Maria NMT is based on BART, described here.\n\nNote:\nWe do not have the full sample count of the original MedQA due to exceedance of the maximum window size. \nIn updated version we will use stride to translate complete documents.# Attribution\n\nIf you use this dataset please use the following to credit the creators of MedQA:\n\n\n\nThe creators of the OPUS-MT models:\n\n\nand# License\n\nFor both the Maria NMT model and the original Helsinki NLP Opus MT model \nwe did not find a license. We also did not find a license for the MedQA corpus. For these reasons we use a permissive CC BY \nlicense. If this was in error please let us know and we will add the appropriate licensing promptly." ]
6145b581c63efde6d29917f246c88f47b3595af0
# Dataset Card for "SDv2-GPT4Spatial-2000" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Doub7e/SDv2-GPT4Spatial-2000
[ "region:us" ]
2023-11-04T01:13:09+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "prompt", "dtype": "string"}, {"name": "T5_last_hidden_states", "sequence": {"sequence": {"sequence": "float32"}}}], "splits": [{"name": "train", "num_bytes": 2036667731.0, "num_examples": 2000}], "download_size": 2002909638, "dataset_size": 2036667731.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-05T02:08:37+00:00
[]
[]
TAGS #region-us
# Dataset Card for "SDv2-GPT4Spatial-2000" More Information needed
[ "# Dataset Card for \"SDv2-GPT4Spatial-2000\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"SDv2-GPT4Spatial-2000\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"SDv2-GPT4Spatial-2000\"\n\nMore Information needed" ]
b471bb5451161ded2e025422e37c7e82bdb01744
# Dataset Card for "OCR_redSeal_bias" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
DataStudio/OCR_redSeal_bias
[ "region:us" ]
2023-11-04T01:40:32+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}, {"name": "Noise_level", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 736106745.5, "num_examples": 61100}], "download_size": 735968008, "dataset_size": 736106745.5}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-04T07:26:51+00:00
[]
[]
TAGS #region-us
# Dataset Card for "OCR_redSeal_bias" More Information needed
[ "# Dataset Card for \"OCR_redSeal_bias\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"OCR_redSeal_bias\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"OCR_redSeal_bias\"\n\nMore Information needed" ]
ada2cee998171df450c9186f9fd9f0e9abdd8873
# Dataset Card for "model_validation_ranked_ds" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Santp98/model_validation_ranked_ds
[ "region:us" ]
2023-11-04T02:01:37+00:00
{"dataset_info": {"features": [{"name": "rank_1", "dtype": "string"}, {"name": "rank_2", "dtype": "string"}, {"name": "rank_3", "dtype": "string"}, {"name": "rank_4", "dtype": "string"}, {"name": "rank_5", "dtype": "string"}, {"name": "rank_6", "dtype": "string"}, {"name": "rank_7", "dtype": "string"}, {"name": "rank_8", "dtype": "string"}, {"name": "rank_9", "dtype": "string"}, {"name": "rank_10", "dtype": "string"}, {"name": "rank_11", "dtype": "string"}, {"name": "rank_12", "dtype": "string"}, {"name": "rank_13", "dtype": "string"}, {"name": "rank_14", "dtype": "string"}, {"name": "rank_15", "dtype": "string"}, {"name": "rank_16", "dtype": "string"}, {"name": "rank_17", "dtype": "string"}, {"name": "rank_18", "dtype": "string"}, {"name": "rank_19", "dtype": "string"}, {"name": "rank_20", "dtype": "string"}, {"name": "rank_21", "dtype": "string"}, {"name": "rank_22", "dtype": "string"}, {"name": "rank_23", "dtype": "string"}, {"name": "rank_24", "dtype": "string"}, {"name": "rank_25", "dtype": "string"}, {"name": "rank_26", "dtype": "string"}, {"name": "rank_27", "dtype": "string"}, {"name": "rank_28", "dtype": "string"}, {"name": "rank_29", "dtype": "string"}, {"name": "rank_30", "dtype": "string"}, {"name": "rank_31", "dtype": "string"}, {"name": "rank_32", "dtype": "string"}, {"name": "rank_33", "dtype": "string"}, {"name": "rank_34", "dtype": "string"}, {"name": "rank_35", "dtype": "string"}, {"name": "rank_36", "dtype": "string"}, {"name": "rank_37", "dtype": "string"}, {"name": "rank_38", "dtype": "string"}, {"name": "rank_39", "dtype": "string"}, {"name": "rank_40", "dtype": "string"}, {"name": "rank_41", "dtype": "string"}, {"name": "rank_42", "dtype": "string"}, {"name": "rank_43", "dtype": "string"}, {"name": "rank_44", "dtype": "string"}, {"name": "rank_45", "dtype": "string"}, {"name": "rank_46", "dtype": "string"}, {"name": "rank_47", "dtype": "string"}, {"name": "rank_48", "dtype": "string"}, {"name": "rank_49", "dtype": "string"}, {"name": "rank_50", "dtype": "string"}, {"name": "rank_51", "dtype": "string"}, {"name": "rank_52", "dtype": "string"}, {"name": "rank_53", "dtype": "string"}, {"name": "rank_54", "dtype": "string"}, {"name": "rank_55", "dtype": "string"}, {"name": "rank_56", "dtype": "string"}, {"name": "rank_57", "dtype": "string"}, {"name": "rank_58", "dtype": "string"}, {"name": "rank_59", "dtype": "string"}, {"name": "rank_60", "dtype": "string"}, {"name": "rank_61", "dtype": "string"}, {"name": "rank_62", "dtype": "string"}, {"name": "rank_63", "dtype": "string"}, {"name": "rank_64", "dtype": "string"}, {"name": "rank_65", "dtype": "string"}, {"name": "rank_66", "dtype": "string"}, {"name": "rank_67", "dtype": "string"}, {"name": "rank_68", "dtype": "string"}, {"name": "rank_69", "dtype": "string"}, {"name": "rank_70", "dtype": "string"}, {"name": "rank_71", "dtype": "string"}, {"name": "rank_72", "dtype": "string"}, {"name": "rank_73", "dtype": "string"}, {"name": "rank_74", "dtype": "string"}, {"name": "rank_75", "dtype": "string"}, {"name": "rank_76", "dtype": "string"}, {"name": "rank_77", "dtype": "string"}, {"name": "rank_78", "dtype": "string"}, {"name": "rank_79", "dtype": "string"}, {"name": "rank_80", "dtype": "string"}, {"name": "rank_81", "dtype": "string"}, {"name": "rank_82", "dtype": "string"}, {"name": "rank_83", "dtype": "string"}, {"name": "rank_84", "dtype": "string"}, {"name": "rank_85", "dtype": "string"}, {"name": "rank_86", "dtype": "string"}, {"name": "rank_87", "dtype": "string"}, {"name": "rank_88", "dtype": "string"}, {"name": "rank_89", "dtype": "string"}, {"name": "rank_90", "dtype": "string"}, {"name": "rank_91", "dtype": "string"}, {"name": "rank_92", "dtype": "string"}, {"name": "rank_93", "dtype": "string"}, {"name": "rank_94", "dtype": "string"}, {"name": "rank_95", "dtype": "string"}, {"name": "rank_96", "dtype": "string"}, {"name": "rank_97", "dtype": "string"}, {"name": "rank_98", "dtype": "string"}, {"name": "rank_99", "dtype": "string"}, {"name": "rank_100", "dtype": "string"}, {"name": "generated_queries", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 820598, "num_examples": 500}], "download_size": 308559, "dataset_size": 820598}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-04T02:01:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for "model_validation_ranked_ds" More Information needed
[ "# Dataset Card for \"model_validation_ranked_ds\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"model_validation_ranked_ds\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"model_validation_ranked_ds\"\n\nMore Information needed" ]
3f3fbc8750e92917db9c177f7ddb91fefa55e8bb
<div align="center"> <img width="640" alt="aghent/Aerial-Semantic-Segmentation-Cactis" src="https://huggingface.co/datasets/aghent/Aerial-Semantic-Segmentation-Cactis/resolve/main/thumbnail.jpg"> </div> ### Dataset Labels ``` ['copiapoa', 'copiapoa-v2'] ``` ### Number of Images ```json {'valid': 1060, 'test': 1013, 'train': 8028} ``` ### How to Use - Install [datasets](https://pypi.org/project/datasets/): ```bash pip install datasets ``` - Load the dataset: ```python from datasets import load_dataset ds = load_dataset("aghent/Aerial-Semantic-Segmentation-Cactis", name="full") example = ds['train'][0] ``` ### Roboflow Dataset Page [https://universe.roboflow.com/uai-63qde/instance-segmentation-kgvep/dataset/1](https://universe.roboflow.com/uai-63qde/instance-segmentation-kgvep/dataset/1?ref=roboflow2huggingface) ### Citation ``` @misc{ instance-segmentation-kgvep_dataset, title = { Instance Segmentation Dataset }, type = { Open Source Dataset }, author = { UAI }, howpublished = { \\url{ https://universe.roboflow.com/uai-63qde/instance-segmentation-kgvep } }, url = { https://universe.roboflow.com/uai-63qde/instance-segmentation-kgvep }, journal = { Roboflow Universe }, publisher = { Roboflow }, year = { 2023 }, month = { nov }, note = { visited on 2023-11-04 }, } ``` ### License CC BY 4.0 ### Dataset Summary This dataset was exported via roboflow.com on November 4, 2023 at 2:50 AM GMT Roboflow is an end-to-end computer vision platform that helps you * collaborate with your team on computer vision projects * collect & organize images * understand and search unstructured image data * annotate, and create datasets * export, train, and deploy computer vision models * use active learning to improve your dataset over time For state of the art Computer Vision training notebooks you can use with this dataset, visit https://github.com/roboflow/notebooks To find over 100k other datasets and pre-trained models, visit https://universe.roboflow.com The dataset includes 10101 images. Cactis are annotated in COCO format. The following pre-processing was applied to each image: No image augmentation techniques were applied.
aghent/Aerial-Semantic-Segmentation-Cactis
[ "task_categories:image-segmentation", "roboflow", "roboflow2huggingface", "region:us" ]
2023-11-04T02:48:23+00:00
{"task_categories": ["image-segmentation"], "tags": ["roboflow", "roboflow2huggingface"]}
2023-11-04T02:52:16+00:00
[]
[]
TAGS #task_categories-image-segmentation #roboflow #roboflow2huggingface #region-us
<div align="center"> <img width="640" alt="aghent/Aerial-Semantic-Segmentation-Cactis" src="URL </div> ### Dataset Labels ### Number of Images ### How to Use - Install datasets: - Load the dataset: ### Roboflow Dataset Page URL ### License CC BY 4.0 ### Dataset Summary This dataset was exported via URL on November 4, 2023 at 2:50 AM GMT Roboflow is an end-to-end computer vision platform that helps you * collaborate with your team on computer vision projects * collect & organize images * understand and search unstructured image data * annotate, and create datasets * export, train, and deploy computer vision models * use active learning to improve your dataset over time For state of the art Computer Vision training notebooks you can use with this dataset, visit URL To find over 100k other datasets and pre-trained models, visit URL The dataset includes 10101 images. Cactis are annotated in COCO format. The following pre-processing was applied to each image: No image augmentation techniques were applied.
[ "### Dataset Labels", "### Number of Images", "### How to Use\n\n- Install datasets:\n\n\n\n- Load the dataset:", "### Roboflow Dataset Page\nURL", "### License\nCC BY 4.0", "### Dataset Summary\nThis dataset was exported via URL on November 4, 2023 at 2:50 AM GMT\n\nRoboflow is an end-to-end computer vision platform that helps you\n* collaborate with your team on computer vision projects\n* collect & organize images\n* understand and search unstructured image data\n* annotate, and create datasets\n* export, train, and deploy computer vision models\n* use active learning to improve your dataset over time\n\nFor state of the art Computer Vision training notebooks you can use with this dataset,\nvisit URL\n\nTo find over 100k other datasets and pre-trained models, visit URL\n\nThe dataset includes 10101 images.\nCactis are annotated in COCO format.\n\nThe following pre-processing was applied to each image:\n\nNo image augmentation techniques were applied." ]
[ "TAGS\n#task_categories-image-segmentation #roboflow #roboflow2huggingface #region-us \n", "### Dataset Labels", "### Number of Images", "### How to Use\n\n- Install datasets:\n\n\n\n- Load the dataset:", "### Roboflow Dataset Page\nURL", "### License\nCC BY 4.0", "### Dataset Summary\nThis dataset was exported via URL on November 4, 2023 at 2:50 AM GMT\n\nRoboflow is an end-to-end computer vision platform that helps you\n* collaborate with your team on computer vision projects\n* collect & organize images\n* understand and search unstructured image data\n* annotate, and create datasets\n* export, train, and deploy computer vision models\n* use active learning to improve your dataset over time\n\nFor state of the art Computer Vision training notebooks you can use with this dataset,\nvisit URL\n\nTo find over 100k other datasets and pre-trained models, visit URL\n\nThe dataset includes 10101 images.\nCactis are annotated in COCO format.\n\nThe following pre-processing was applied to each image:\n\nNo image augmentation techniques were applied." ]
[ 28, 5, 5, 18, 8, 6, 173 ]
[ "passage: TAGS\n#task_categories-image-segmentation #roboflow #roboflow2huggingface #region-us \n### Dataset Labels### Number of Images### How to Use\n\n- Install datasets:\n\n\n\n- Load the dataset:### Roboflow Dataset Page\nURL### License\nCC BY 4.0### Dataset Summary\nThis dataset was exported via URL on November 4, 2023 at 2:50 AM GMT\n\nRoboflow is an end-to-end computer vision platform that helps you\n* collaborate with your team on computer vision projects\n* collect & organize images\n* understand and search unstructured image data\n* annotate, and create datasets\n* export, train, and deploy computer vision models\n* use active learning to improve your dataset over time\n\nFor state of the art Computer Vision training notebooks you can use with this dataset,\nvisit URL\n\nTo find over 100k other datasets and pre-trained models, visit URL\n\nThe dataset includes 10101 images.\nCactis are annotated in COCO format.\n\nThe following pre-processing was applied to each image:\n\nNo image augmentation techniques were applied." ]
22f5756d31173817c66eb513f3e3c3c2b13f20e4
# Dataset Card for Dataset Name <!-- Provide a quick summary of the dataset. --> ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
pgurazada1/tesla-qna-feedback-logs
[ "region:us" ]
2023-11-04T03:16:02+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data.csv"}]}]}
2024-02-17T07:40:36+00:00
[]
[]
TAGS #region-us
# Dataset Card for Dataset Name ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Dataset Name", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Dataset Name", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 8, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Dataset Name## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
051494c82b674f80a585f289071880a6ad884333
**Motivation for Dataset Creation** - *Why was the dataset created? (e.g., were there specific tasks in mind, or a specific gap that needed to be filled?)* This dataset was created to help advance the field of intelligent music production, specifically targeting music mixing in a digital audio workstation (DAW). - *What (other) tasks could the dataset be used for? Are there obvious tasks for which it should not be used?* This dataset could possibly be used to predict parameter values via semantic labels provided by the mixed listening evaluations. - *Has the dataset been used for any tasks already? If so, where are the results so others can compare (e.g., links to published papers)?* Currently, this dataset is still being curated and has yet to be used for any task. This will be updated once that has changed. - *Who funded the creation of the dataset? If there is an associated grant, provide the grant number.* The National Science Foundation Graduate Research Fellowship Program (Award Abstract # 1650114) helped to financially support the creation of this dataset by helping financially support the creator through their graduate program. - *Any other comments?* **Dataset Composition** - *What are the instances? (that is, examples; e.g., documents, images, people, countries) Are there multiple types of instances? (e.g., movies, users, ratings; people, interactions between them; nodes, edges)* The instances themselves are annotated of individual mixes either from Logic Pro, Pro Tools, or Reaper, depending on the artist who mixed them. - *Are relationships between instances made explicit in the data (e.g., social network links, user/movie ratings, etc.)?* Each mix is unique to the other, and there exists no evident relationship between them. - *How many instances of each type are there?* There will be 114 mixes once this dataset is finalized. - *What data does each instance consist of? "Raw" data (e.g., unprocessed text or images)? Features/attributes? Is there a label/target associated with instances? If the instances are related to people, are subpopulations identified (e.g., by age, gender, etc.) and what is their distribution?* Each instance of a mix contains the following: Mix Name, Song Name, Artist Name, Genre, Tracks, Track Name, Track Type, Track Audio Path, Channel Mode, Parameters, Gain, Pan, (Etc) - *Is everything included or does the data rely on external resources? (e.g., websites, tweets, datasets) If external resources, a) are there guarantees that they will exist, and remain constant, over time; b) is there an official archival version. Are there licenses, fees or rights associated with any of the data?* The audio that is associated with each mix is an external resource, as those audio files are original to their source. The original audio sources are from The Mixing Secrets, Weathervane, or The Open Multitrack Testbed. - *Are there recommended data splits or evaluation measures? (e.g., training, development, testing; accuracy/AUC)* There are no data splits recommended for this. However, suppose no listening evaluation is available for that current mix. In that case, we recommend leaving out that mix if you plan on using those comments for the semantic representation of the mix. All of the mixes that were annotated from Mike Senior's The Mixing Secret projects for sound on sound do not contain any listening evaluation. - *What experiments were initially run on this dataset? Have a summary of those results and, if available, provide the link to a paper with more information here.* No experiments have been run on this dataset as of yet. - Any other comments? **Data Collection Process** - *How was the data collected? (e.g., hardware apparatus/sensor, manual human curation, software program, software interface/API; how were these constructs/measures/methods validated?)* The data was collected manually by annotating parameter values for each track in the mix. The mix projects were provided as Logic Pro, Pro Tools, or Reaper files. Each project was opened in their respective software and the author went through each track and annotated these parameters manually. A tool was created to help assemble this dataset for parameter values that plugin manufacturers obscured. This tool estimated the value of each parameter based on the visual representation that was provided in the plugin. - *Who was involved in the data collection process? (e.g., students, crowdworkers) How were they compensated? (e.g., how much were crowdworkers paid?)* The author of this dataset collected the data and is a graduate student at the University of Utah. - *Over what time-frame was the data collected? Does the collection time-frame match the creation time-frame?* This dataset has been collected from September through November of 2023. The creation time frame overlaps the collection time frame as the main structure for the dataset was created, and mixes are added iteratively. - *How was the data associated with each instance acquired? Was the data directly observable (e.g., raw text, movie ratings), reported by subjects (e.g., survey responses), or indirectly inferred/derived from other data (e.g., part of speech tags; model-based guesses for age or language)? If the latter two, were they validated/verified and if so how?* The data were directly associated with each instance. The parameter values are visually represented in each session file for the mixes. - *Does the dataset contain all possible instances? Or is it, for instance, a sample (not necessarily random) from a larger set of instances?* The dataset contains all possible instances that were given by The Mix Evaluation Dataset, negating the copyrighted songs that were used in the listening evaluation. - *If the dataset is a sample, then what is the population? What was the sampling strategy (e.g., deterministic, probabilistic with specific sampling probabilities)? Is the sample representative of the larger set (e.g., geographic coverage)? If not, why not (e.g., to cover a more diverse range of instances)? How does this affect possible uses?* This dataset does not represent a sample of a larger population and thus, a sample size is not appropriate in this case. - *Is there information missing from the dataset and why? (this does not include intentionally dropped instances; it might include, e.g., redacted text, withheld documents) Is this data missing because it was unavailable?* Not all of the parameter values for every plugin used were documented. Occasionally a mix would include a saturator or a multiband compressor. Due to the low occurrence of these plugins, these were omitted for the annotating process. - *Are there any known errors, sources of noise, or redundancies in the data?* To the author's knowledge, there are no errors or sources of noise within this dataset. - *Any other comments?* **Data Preprocessing** - *What preprocessing/cleaning was done? (e.g., discretization or bucketing, tokenization, part-of-speech tagging, SIFT feature extraction, removal of instances, processing of missing values, etc.)* The data preprocessing happened during the data collection stage for this dataset. Some of the data values were not available from the plugins that were used in a DAW session file. To help estimate the values on each of the parameters for that respective plugin, a tool was created and used by this author. If there wasn't a value for the parameter, the value was omitted from the data collection. - *Was the "raw" data saved in addition to the preprocessed/cleaned data? (e.g., to support unanticipated future uses)* The raw data is still saved in the project files but was not annotated and, therefore, is not contained in this dataset. For the raw files of each mix, the reader should explore The Mix Evaluation dataset for these values. - *Is the preprocessing software available?* The tool that was used to help the author annotate some of the parameter values is available for download [here](https://github.com/mclemcrew/MixologyDB) - *Does this dataset collection/processing procedure achieve the motivation for creating the dataset stated in the first section of this datasheet?* The authors of this dataset intended to create an ethical source repository for AI music researchers to use for music mixing. We believe by using The Mix Evaluation dataset along with publically available music mixing projects, we have achieved our goal. Although this dataset is considerably smaller than what is required for most model architectures utilized in generative AI applications, we hope this is a positive addition to the field. - *Any other comments?* **Dataset Distribution** - *How is the dataset distributed? (e.g., website, API, etc.; does the data have a DOI; is it archived redundantly?)* This dataset is distributed via HuggingFace and will continue to be hosted there for the foreseeable future. There are no current plans to create an API, although a website for the dataset has been mentioned. The data is currently being archived redundantly through the University of Utah's Box account. Should HuggingFace go down or remove the dataset, the data themselves will remain at the University of Utah and will be uploaded to a separate website. - *When will the dataset be released/first distributed? (Is there a canonical paper/reference for this dataset?)* The dataset, in its entirety, will be released on December 5th, 2023. - *What license (if any) is it distributed under? Are there any copyrights on the data?* The license will be distributed via the MIT license. There are no copyrights on this data. - *Are there any fees or access/export restrictions?* There are no fees or access/export restrictions for this dataset. - *Any other comments?* **Dataset Maintenance** - *Who is supporting/hosting/maintaining the dataset? How does one contact the owner/curator/manager of the dataset (e.g. email address, or other contact info)?* HuggingFace is currently hosting the dataset and Michael Clemens (email: michael.clemens at utah.edu) is maintaining the dataset. - *Will the dataset be updated? How often and by whom? How will updates/revisions be documented and communicated (e.g., mailing list, GitHub)? Is there an erratum?* The release of this dataset is set to be ***December 5th, 2023***. Updates and revisions will be documented through the repository through HuggingFace. There is currently no erratum, but should that be the case, this will be documented here as they come about. - *If the dataset becomes obsolete how will this be communicated?* Should the dataset no longer be valid, this will be communicated through the ReadMe right here on HF. - *Is there a repository to link to any/all papers/systems that use this dataset?* There is no repo or link to any paper/systems that use the dataset. Should this dataset be used in the future for papers or system design, there will be a link to these works on this ReadMe, or a website will be created and linked here for the collection of works. - *If others want to extend/augment/build on this dataset, is there a mechanism for them to do so? If so, is there a process for tracking/assessing the quality of those contributions. What is the process for communicating/distributing these contributions to users?* This dataset is an extension of The Mix Evaluation Dataset by Brecht De Man et al., and users are free to extend/augment/build on this dataset. There is no trackable way currently of assessing these contributions. - - *Any other comments?* **Legal & Ethical Considerations** - *If the dataset relates to people (e.g., their attributes) or was generated by people, were they informed about the data collection? (e.g., datasets that collect writing, photos, interactions, transactions, etc.)* As this was a derivative of another work that performed the main data collection, the original music producers who mixed these tracks were not informed of the creation of this dataset. - *If it relates to other ethically protected subjects, have appropriate obligations been met? (e.g., medical data might include information collected from animals)* N/A - *If it relates to people, were there any ethical review applications/reviews/approvals? (e.g. Institutional Review Board applications)* As this is an extension of the main dataset by Brecht De Man et al. and the data collection had already been conducted, an IRB was not included in this creation of this dataset. The data themselves are not related to the music producers but instead remain as an artifact of their work. Due to the nature of these data, an IRB was not needed. - *If it relates to people, were they told what the dataset would be used for and did they consent? What community norms exist for data collected from human communications? If consent was obtained, how? Were the people provided with any mechanism to revoke their consent in the future or for certain uses?* N/A - *If it relates to people, could this dataset expose people to harm or legal action? (e.g., financial social or otherwise) What was done to mitigate or reduce the potential for harm?* The main initiative of this work was to create a dataset that was ethically sourced for parameter recommendations in the music-mixing process. With this, all of the data found here has been gathered from publically avaiable data from artists. Therefore no copyright or fair use infringement exists. - *If it relates to people, does it unfairly advantage or disadvantage a particular social group? In what ways? How was this mitigated? If it relates to people, were they provided with privacy guarantees? If so, what guarantees and how are these ensured?* N/A - *Does the dataset comply with the EU General Data Protection Regulation (GDPR)? Does it comply with any other standards, such as the US Equal Employment Opportunity Act? Does the dataset contain information that might be considered sensitive or confidential? (e.g., personally identifying information)* To the authors' knowledge, this dataset complies with the laws mentioned above. - *Does the dataset contain information that might be considered inappropriate or offensive?* No, this dataset does not contain any information like this. - *Any other comments?*
mclemcrew/MixologyDB
[ "size_categories:n<1K", "language:en", "license:mit", "music", "region:us" ]
2023-11-04T03:27:57+00:00
{"language": ["en"], "license": "mit", "size_categories": ["n<1K"], "tags": ["music"]}
2023-11-04T14:38:36+00:00
[]
[ "en" ]
TAGS #size_categories-n<1K #language-English #license-mit #music #region-us
Motivation for Dataset Creation - *Why was the dataset created? (e.g., were there specific tasks in mind, or a specific gap that needed to be filled?)* This dataset was created to help advance the field of intelligent music production, specifically targeting music mixing in a digital audio workstation (DAW). - *What (other) tasks could the dataset be used for? Are there obvious tasks for which it should not be used?* This dataset could possibly be used to predict parameter values via semantic labels provided by the mixed listening evaluations. - *Has the dataset been used for any tasks already? If so, where are the results so others can compare (e.g., links to published papers)?* Currently, this dataset is still being curated and has yet to be used for any task. This will be updated once that has changed. - *Who funded the creation of the dataset? If there is an associated grant, provide the grant number.* The National Science Foundation Graduate Research Fellowship Program (Award Abstract # 1650114) helped to financially support the creation of this dataset by helping financially support the creator through their graduate program. - *Any other comments?* Dataset Composition - *What are the instances? (that is, examples; e.g., documents, images, people, countries) Are there multiple types of instances? (e.g., movies, users, ratings; people, interactions between them; nodes, edges)* The instances themselves are annotated of individual mixes either from Logic Pro, Pro Tools, or Reaper, depending on the artist who mixed them. - *Are relationships between instances made explicit in the data (e.g., social network links, user/movie ratings, etc.)?* Each mix is unique to the other, and there exists no evident relationship between them. - *How many instances of each type are there?* There will be 114 mixes once this dataset is finalized. - *What data does each instance consist of? "Raw" data (e.g., unprocessed text or images)? Features/attributes? Is there a label/target associated with instances? If the instances are related to people, are subpopulations identified (e.g., by age, gender, etc.) and what is their distribution?* Each instance of a mix contains the following: Mix Name, Song Name, Artist Name, Genre, Tracks, Track Name, Track Type, Track Audio Path, Channel Mode, Parameters, Gain, Pan, (Etc) - *Is everything included or does the data rely on external resources? (e.g., websites, tweets, datasets) If external resources, a) are there guarantees that they will exist, and remain constant, over time; b) is there an official archival version. Are there licenses, fees or rights associated with any of the data?* The audio that is associated with each mix is an external resource, as those audio files are original to their source. The original audio sources are from The Mixing Secrets, Weathervane, or The Open Multitrack Testbed. - *Are there recommended data splits or evaluation measures? (e.g., training, development, testing; accuracy/AUC)* There are no data splits recommended for this. However, suppose no listening evaluation is available for that current mix. In that case, we recommend leaving out that mix if you plan on using those comments for the semantic representation of the mix. All of the mixes that were annotated from Mike Senior's The Mixing Secret projects for sound on sound do not contain any listening evaluation. - *What experiments were initially run on this dataset? Have a summary of those results and, if available, provide the link to a paper with more information here.* No experiments have been run on this dataset as of yet. - Any other comments? Data Collection Process - *How was the data collected? (e.g., hardware apparatus/sensor, manual human curation, software program, software interface/API; how were these constructs/measures/methods validated?)* The data was collected manually by annotating parameter values for each track in the mix. The mix projects were provided as Logic Pro, Pro Tools, or Reaper files. Each project was opened in their respective software and the author went through each track and annotated these parameters manually. A tool was created to help assemble this dataset for parameter values that plugin manufacturers obscured. This tool estimated the value of each parameter based on the visual representation that was provided in the plugin. - *Who was involved in the data collection process? (e.g., students, crowdworkers) How were they compensated? (e.g., how much were crowdworkers paid?)* The author of this dataset collected the data and is a graduate student at the University of Utah. - *Over what time-frame was the data collected? Does the collection time-frame match the creation time-frame?* This dataset has been collected from September through November of 2023. The creation time frame overlaps the collection time frame as the main structure for the dataset was created, and mixes are added iteratively. - *How was the data associated with each instance acquired? Was the data directly observable (e.g., raw text, movie ratings), reported by subjects (e.g., survey responses), or indirectly inferred/derived from other data (e.g., part of speech tags; model-based guesses for age or language)? If the latter two, were they validated/verified and if so how?* The data were directly associated with each instance. The parameter values are visually represented in each session file for the mixes. - *Does the dataset contain all possible instances? Or is it, for instance, a sample (not necessarily random) from a larger set of instances?* The dataset contains all possible instances that were given by The Mix Evaluation Dataset, negating the copyrighted songs that were used in the listening evaluation. - *If the dataset is a sample, then what is the population? What was the sampling strategy (e.g., deterministic, probabilistic with specific sampling probabilities)? Is the sample representative of the larger set (e.g., geographic coverage)? If not, why not (e.g., to cover a more diverse range of instances)? How does this affect possible uses?* This dataset does not represent a sample of a larger population and thus, a sample size is not appropriate in this case. - *Is there information missing from the dataset and why? (this does not include intentionally dropped instances; it might include, e.g., redacted text, withheld documents) Is this data missing because it was unavailable?* Not all of the parameter values for every plugin used were documented. Occasionally a mix would include a saturator or a multiband compressor. Due to the low occurrence of these plugins, these were omitted for the annotating process. - *Are there any known errors, sources of noise, or redundancies in the data?* To the author's knowledge, there are no errors or sources of noise within this dataset. - *Any other comments?* Data Preprocessing - *What preprocessing/cleaning was done? (e.g., discretization or bucketing, tokenization, part-of-speech tagging, SIFT feature extraction, removal of instances, processing of missing values, etc.)* The data preprocessing happened during the data collection stage for this dataset. Some of the data values were not available from the plugins that were used in a DAW session file. To help estimate the values on each of the parameters for that respective plugin, a tool was created and used by this author. If there wasn't a value for the parameter, the value was omitted from the data collection. - *Was the "raw" data saved in addition to the preprocessed/cleaned data? (e.g., to support unanticipated future uses)* The raw data is still saved in the project files but was not annotated and, therefore, is not contained in this dataset. For the raw files of each mix, the reader should explore The Mix Evaluation dataset for these values. - *Is the preprocessing software available?* The tool that was used to help the author annotate some of the parameter values is available for download here - *Does this dataset collection/processing procedure achieve the motivation for creating the dataset stated in the first section of this datasheet?* The authors of this dataset intended to create an ethical source repository for AI music researchers to use for music mixing. We believe by using The Mix Evaluation dataset along with publically available music mixing projects, we have achieved our goal. Although this dataset is considerably smaller than what is required for most model architectures utilized in generative AI applications, we hope this is a positive addition to the field. - *Any other comments?* Dataset Distribution - *How is the dataset distributed? (e.g., website, API, etc.; does the data have a DOI; is it archived redundantly?)* This dataset is distributed via HuggingFace and will continue to be hosted there for the foreseeable future. There are no current plans to create an API, although a website for the dataset has been mentioned. The data is currently being archived redundantly through the University of Utah's Box account. Should HuggingFace go down or remove the dataset, the data themselves will remain at the University of Utah and will be uploaded to a separate website. - *When will the dataset be released/first distributed? (Is there a canonical paper/reference for this dataset?)* The dataset, in its entirety, will be released on December 5th, 2023. - *What license (if any) is it distributed under? Are there any copyrights on the data?* The license will be distributed via the MIT license. There are no copyrights on this data. - *Are there any fees or access/export restrictions?* There are no fees or access/export restrictions for this dataset. - *Any other comments?* Dataset Maintenance - *Who is supporting/hosting/maintaining the dataset? How does one contact the owner/curator/manager of the dataset (e.g. email address, or other contact info)?* HuggingFace is currently hosting the dataset and Michael Clemens (email: michael.clemens at URL) is maintaining the dataset. - *Will the dataset be updated? How often and by whom? How will updates/revisions be documented and communicated (e.g., mailing list, GitHub)? Is there an erratum?* The release of this dataset is set to be *December 5th, 2023*. Updates and revisions will be documented through the repository through HuggingFace. There is currently no erratum, but should that be the case, this will be documented here as they come about. - *If the dataset becomes obsolete how will this be communicated?* Should the dataset no longer be valid, this will be communicated through the ReadMe right here on HF. - *Is there a repository to link to any/all papers/systems that use this dataset?* There is no repo or link to any paper/systems that use the dataset. Should this dataset be used in the future for papers or system design, there will be a link to these works on this ReadMe, or a website will be created and linked here for the collection of works. - *If others want to extend/augment/build on this dataset, is there a mechanism for them to do so? If so, is there a process for tracking/assessing the quality of those contributions. What is the process for communicating/distributing these contributions to users?* This dataset is an extension of The Mix Evaluation Dataset by Brecht De Man et al., and users are free to extend/augment/build on this dataset. There is no trackable way currently of assessing these contributions. - - *Any other comments?* Legal & Ethical Considerations - *If the dataset relates to people (e.g., their attributes) or was generated by people, were they informed about the data collection? (e.g., datasets that collect writing, photos, interactions, transactions, etc.)* As this was a derivative of another work that performed the main data collection, the original music producers who mixed these tracks were not informed of the creation of this dataset. - *If it relates to other ethically protected subjects, have appropriate obligations been met? (e.g., medical data might include information collected from animals)* N/A - *If it relates to people, were there any ethical review applications/reviews/approvals? (e.g. Institutional Review Board applications)* As this is an extension of the main dataset by Brecht De Man et al. and the data collection had already been conducted, an IRB was not included in this creation of this dataset. The data themselves are not related to the music producers but instead remain as an artifact of their work. Due to the nature of these data, an IRB was not needed. - *If it relates to people, were they told what the dataset would be used for and did they consent? What community norms exist for data collected from human communications? If consent was obtained, how? Were the people provided with any mechanism to revoke their consent in the future or for certain uses?* N/A - *If it relates to people, could this dataset expose people to harm or legal action? (e.g., financial social or otherwise) What was done to mitigate or reduce the potential for harm?* The main initiative of this work was to create a dataset that was ethically sourced for parameter recommendations in the music-mixing process. With this, all of the data found here has been gathered from publically avaiable data from artists. Therefore no copyright or fair use infringement exists. - *If it relates to people, does it unfairly advantage or disadvantage a particular social group? In what ways? How was this mitigated? If it relates to people, were they provided with privacy guarantees? If so, what guarantees and how are these ensured?* N/A - *Does the dataset comply with the EU General Data Protection Regulation (GDPR)? Does it comply with any other standards, such as the US Equal Employment Opportunity Act? Does the dataset contain information that might be considered sensitive or confidential? (e.g., personally identifying information)* To the authors' knowledge, this dataset complies with the laws mentioned above. - *Does the dataset contain information that might be considered inappropriate or offensive?* No, this dataset does not contain any information like this. - *Any other comments?*
[ "# 1650114) helped to financially support the creation of this dataset by helping financially support the creator through their graduate program.\n\n- *Any other comments?* \n\nDataset Composition\n- *What are the instances? (that is, examples; e.g., documents, images, people, countries) Are there multiple types of instances? (e.g., movies, users, ratings; people, interactions between them; nodes, edges)*\nThe instances themselves are annotated of individual mixes either from Logic Pro, Pro Tools, or Reaper, depending on the artist who mixed them.\n\n- *Are relationships between instances made explicit in the data (e.g., social network links, user/movie ratings, etc.)?*\nEach mix is unique to the other, and there exists no evident relationship between them.\n\n- *How many instances of each type are there?*\nThere will be 114 mixes once this dataset is finalized.\n\n- *What data does each instance consist of? \"Raw\" data (e.g., unprocessed text or images)? Features/attributes? Is there a label/target associated with instances? If the instances are related to people, are subpopulations identified (e.g., by age, gender, etc.) and what is their distribution?*\nEach instance of a mix contains the following: Mix Name, Song Name, Artist Name, Genre, Tracks, Track Name, Track Type, Track Audio Path, Channel Mode, Parameters, Gain, Pan, (Etc)\n\n- *Is everything included or does the data rely on external resources? (e.g., websites, tweets, datasets) If external resources, a) are there guarantees that they will exist, and remain constant, over time; b) is there an official archival version. Are there licenses, fees or rights associated with any of the data?*\nThe audio that is associated with each mix is an external resource, as those audio files are original to their source. The original audio sources are from The Mixing Secrets, Weathervane, or The Open Multitrack Testbed.\n\n- *Are there recommended data splits or evaluation measures? (e.g., training, development, testing; accuracy/AUC)*\nThere are no data splits recommended for this. However, suppose no listening evaluation is available for that current mix. In that case, we recommend leaving out that mix if you plan on using those comments for the semantic representation of the mix. All of the mixes that were annotated from Mike Senior's The Mixing Secret projects for sound on sound do not contain any listening evaluation.\n\n- *What experiments were initially run on this dataset? Have a summary of those results and, if available, provide the link to a paper with more information here.*\nNo experiments have been run on this dataset as of yet.\n\n- Any other comments? \n\nData Collection Process \n- *How was the data collected? (e.g., hardware apparatus/sensor, manual human curation, software program, software interface/API; how were these constructs/measures/methods validated?)*\nThe data was collected manually by annotating parameter values for each track in the mix. The mix projects were provided as Logic Pro, Pro Tools, or Reaper files. Each project was opened in their respective software and the author went through each track and annotated these parameters manually. A tool was created to help assemble this dataset for parameter values that plugin manufacturers obscured. This tool estimated the value of each parameter based on the visual representation that was provided in the plugin.\n\n- *Who was involved in the data collection process? (e.g., students, crowdworkers) How were they compensated? (e.g., how much were crowdworkers paid?)*\nThe author of this dataset collected the data and is a graduate student at the University of Utah.\n\n- *Over what time-frame was the data collected? Does the collection time-frame match the creation time-frame?*\nThis dataset has been collected from September through November of 2023. The creation time frame overlaps the collection time frame as the main structure for the dataset was created, and mixes are added iteratively.\n\n- *How was the data associated with each instance acquired? Was the data directly observable (e.g., raw text, movie ratings), reported by subjects (e.g., survey responses), or indirectly inferred/derived from other data (e.g., part of speech tags; model-based guesses for age or language)? If the latter two, were they validated/verified and if so how?*\nThe data were directly associated with each instance. The parameter values are visually represented in each session file for the mixes.\n\n- *Does the dataset contain all possible instances? Or is it, for instance, a sample (not necessarily random) from a larger set of instances?*\nThe dataset contains all possible instances that were given by The Mix Evaluation Dataset, negating the copyrighted songs that were used in the listening evaluation.\n\n- *If the dataset is a sample, then what is the population? What was the sampling strategy (e.g., deterministic, probabilistic with specific sampling probabilities)? Is the sample representative of the larger set (e.g., geographic coverage)? If not, why not (e.g., to cover a more diverse range of instances)? How does this affect possible uses?*\nThis dataset does not represent a sample of a larger population and thus, a sample size is not appropriate in this case.\n\n- *Is there information missing from the dataset and why? (this does not include intentionally dropped instances; it might include, e.g., redacted text, withheld documents) Is this data missing because it was unavailable?*\nNot all of the parameter values for every plugin used were documented. Occasionally a mix would include a saturator or a multiband compressor. Due to the low occurrence of these plugins, these were omitted for the annotating process.\n\n- *Are there any known errors, sources of noise, or redundancies in the data?*\nTo the author's knowledge, there are no errors or sources of noise within this dataset.\n\n- *Any other comments?*\n\nData Preprocessing \n- *What preprocessing/cleaning was done? (e.g., discretization or bucketing, tokenization, part-of-speech tagging, SIFT feature extraction, removal of instances, processing of missing values, etc.)*\nThe data preprocessing happened during the data collection stage for this dataset. Some of the data values were not available from the plugins that were used in a DAW session file. To help estimate the values on each of the parameters for that respective plugin, a tool was created and used by this author. If there wasn't a value for the parameter, the value was omitted from the data collection.\n\n- *Was the \"raw\" data saved in addition to the preprocessed/cleaned data? (e.g., to support unanticipated future uses)*\nThe raw data is still saved in the project files but was not annotated and, therefore, is not contained in this dataset. For the raw files of each mix, the reader should explore The Mix Evaluation dataset for these values.\n\n- *Is the preprocessing software available?*\nThe tool that was used to help the author annotate some of the parameter values is available for download here\n\n- *Does this dataset collection/processing procedure achieve the motivation for creating the dataset stated in the first section of this datasheet?*\nThe authors of this dataset intended to create an ethical source repository for AI music researchers to use for music mixing. We believe by using The Mix Evaluation dataset along with publically available music mixing projects, we have achieved our goal. Although this dataset is considerably smaller than what is required for most model architectures utilized in generative AI applications, we hope this is a positive addition to the field.\n\n- *Any other comments?*\n\nDataset Distribution \n- *How is the dataset distributed? (e.g., website, API, etc.; does the data have a DOI; is it archived redundantly?)*\nThis dataset is distributed via HuggingFace and will continue to be hosted there for the foreseeable future. There are no current plans to create an API, although a website for the dataset has been mentioned. The data is currently being archived redundantly through the University of Utah's Box account. Should HuggingFace go down or remove the dataset, the data themselves will remain at the University of Utah and will be uploaded to a separate website.\n\n- *When will the dataset be released/first distributed? (Is there a canonical paper/reference for this dataset?)*\nThe dataset, in its entirety, will be released on December 5th, 2023.\n\n- *What license (if any) is it distributed under? Are there any copyrights on the data?*\nThe license will be distributed via the MIT license. There are no copyrights on this data.\n\n- *Are there any fees or access/export restrictions?*\nThere are no fees or access/export restrictions for this dataset.\n\n- *Any other comments?*\n\nDataset Maintenance \n- *Who is supporting/hosting/maintaining the dataset? How does one contact the owner/curator/manager of the dataset (e.g. email address, or other contact info)?*\nHuggingFace is currently hosting the dataset and Michael Clemens (email: michael.clemens at URL) is maintaining the dataset.\n\n- *Will the dataset be updated? How often and by whom? How will updates/revisions be documented and communicated (e.g., mailing list, GitHub)? Is there an erratum?*\nThe release of this dataset is set to be *December 5th, 2023*. Updates and revisions will be documented through the repository through HuggingFace. There is currently no erratum, but should that be the case, this will be documented here as they come about.\n\n- *If the dataset becomes obsolete how will this be communicated?*\nShould the dataset no longer be valid, this will be communicated through the ReadMe right here on HF.\n \n- *Is there a repository to link to any/all papers/systems that use this dataset?*\nThere is no repo or link to any paper/systems that use the dataset. Should this dataset be used in the future for papers or system design, there will be a link to these works on this ReadMe, or a website will be created and linked here for the collection of works.\n\n- *If others want to extend/augment/build on this dataset, is there a mechanism for them to do so? If so, is there a process for tracking/assessing the quality of those contributions. What is the process for communicating/distributing these contributions to users?*\nThis dataset is an extension of The Mix Evaluation Dataset by Brecht De Man et al., and users are free to extend/augment/build on this dataset. There is no trackable way currently of assessing these contributions.\n- \n- *Any other comments?*\n\nLegal & Ethical Considerations\n- *If the dataset relates to people (e.g., their attributes) or was generated by people, were they informed about the data collection? (e.g., datasets that collect writing, photos, interactions, transactions, etc.)*\nAs this was a derivative of another work that performed the main data collection, the original music producers who mixed these tracks were not informed of the creation of this dataset.\n\n- *If it relates to other ethically protected subjects, have appropriate obligations been met? (e.g., medical data might include information collected from animals)*\nN/A\n\n- *If it relates to people, were there any ethical review applications/reviews/approvals? (e.g. Institutional Review Board applications)*\nAs this is an extension of the main dataset by Brecht De Man et al. and the data collection had already been conducted, an IRB was not included in this creation of this dataset. The data themselves are not related to the music producers but instead remain as an artifact of their work. Due to the nature of these data, an IRB was not needed.\n \n- *If it relates to people, were they told what the dataset would be used for and did they consent? What community norms exist for data collected from human communications? If consent was obtained, how? Were the people provided with any mechanism to revoke their consent in the future or for certain uses?*\nN/A\n\n- *If it relates to people, could this dataset expose people to harm or legal action? (e.g., financial social or otherwise) What was done to mitigate or reduce the potential for harm?*\nThe main initiative of this work was to create a dataset that was ethically sourced for parameter recommendations in the music-mixing process. With this, all of the data found here has been gathered from publically avaiable data from artists. Therefore no copyright or fair use infringement exists.\n \n- *If it relates to people, does it unfairly advantage or disadvantage a particular social group? In what ways? How was this mitigated? If it relates to people, were they provided with privacy guarantees? If so, what guarantees and how are these ensured?*\nN/A\n\n- *Does the dataset comply with the EU General Data Protection Regulation (GDPR)? Does it comply with any other standards, such as the US Equal Employment Opportunity Act? Does the dataset contain information that might be considered sensitive or confidential? (e.g., personally identifying information)*\nTo the authors' knowledge, this dataset complies with the laws mentioned above.\n \n- *Does the dataset contain information that might be considered inappropriate or offensive?*\nNo, this dataset does not contain any information like this.\n\n- *Any other comments?*" ]
[ "TAGS\n#size_categories-n<1K #language-English #license-mit #music #region-us \n", "# 1650114) helped to financially support the creation of this dataset by helping financially support the creator through their graduate program.\n\n- *Any other comments?* \n\nDataset Composition\n- *What are the instances? (that is, examples; e.g., documents, images, people, countries) Are there multiple types of instances? (e.g., movies, users, ratings; people, interactions between them; nodes, edges)*\nThe instances themselves are annotated of individual mixes either from Logic Pro, Pro Tools, or Reaper, depending on the artist who mixed them.\n\n- *Are relationships between instances made explicit in the data (e.g., social network links, user/movie ratings, etc.)?*\nEach mix is unique to the other, and there exists no evident relationship between them.\n\n- *How many instances of each type are there?*\nThere will be 114 mixes once this dataset is finalized.\n\n- *What data does each instance consist of? \"Raw\" data (e.g., unprocessed text or images)? Features/attributes? Is there a label/target associated with instances? If the instances are related to people, are subpopulations identified (e.g., by age, gender, etc.) and what is their distribution?*\nEach instance of a mix contains the following: Mix Name, Song Name, Artist Name, Genre, Tracks, Track Name, Track Type, Track Audio Path, Channel Mode, Parameters, Gain, Pan, (Etc)\n\n- *Is everything included or does the data rely on external resources? (e.g., websites, tweets, datasets) If external resources, a) are there guarantees that they will exist, and remain constant, over time; b) is there an official archival version. Are there licenses, fees or rights associated with any of the data?*\nThe audio that is associated with each mix is an external resource, as those audio files are original to their source. The original audio sources are from The Mixing Secrets, Weathervane, or The Open Multitrack Testbed.\n\n- *Are there recommended data splits or evaluation measures? (e.g., training, development, testing; accuracy/AUC)*\nThere are no data splits recommended for this. However, suppose no listening evaluation is available for that current mix. In that case, we recommend leaving out that mix if you plan on using those comments for the semantic representation of the mix. All of the mixes that were annotated from Mike Senior's The Mixing Secret projects for sound on sound do not contain any listening evaluation.\n\n- *What experiments were initially run on this dataset? Have a summary of those results and, if available, provide the link to a paper with more information here.*\nNo experiments have been run on this dataset as of yet.\n\n- Any other comments? \n\nData Collection Process \n- *How was the data collected? (e.g., hardware apparatus/sensor, manual human curation, software program, software interface/API; how were these constructs/measures/methods validated?)*\nThe data was collected manually by annotating parameter values for each track in the mix. The mix projects were provided as Logic Pro, Pro Tools, or Reaper files. Each project was opened in their respective software and the author went through each track and annotated these parameters manually. A tool was created to help assemble this dataset for parameter values that plugin manufacturers obscured. This tool estimated the value of each parameter based on the visual representation that was provided in the plugin.\n\n- *Who was involved in the data collection process? (e.g., students, crowdworkers) How were they compensated? (e.g., how much were crowdworkers paid?)*\nThe author of this dataset collected the data and is a graduate student at the University of Utah.\n\n- *Over what time-frame was the data collected? Does the collection time-frame match the creation time-frame?*\nThis dataset has been collected from September through November of 2023. The creation time frame overlaps the collection time frame as the main structure for the dataset was created, and mixes are added iteratively.\n\n- *How was the data associated with each instance acquired? Was the data directly observable (e.g., raw text, movie ratings), reported by subjects (e.g., survey responses), or indirectly inferred/derived from other data (e.g., part of speech tags; model-based guesses for age or language)? If the latter two, were they validated/verified and if so how?*\nThe data were directly associated with each instance. The parameter values are visually represented in each session file for the mixes.\n\n- *Does the dataset contain all possible instances? Or is it, for instance, a sample (not necessarily random) from a larger set of instances?*\nThe dataset contains all possible instances that were given by The Mix Evaluation Dataset, negating the copyrighted songs that were used in the listening evaluation.\n\n- *If the dataset is a sample, then what is the population? What was the sampling strategy (e.g., deterministic, probabilistic with specific sampling probabilities)? Is the sample representative of the larger set (e.g., geographic coverage)? If not, why not (e.g., to cover a more diverse range of instances)? How does this affect possible uses?*\nThis dataset does not represent a sample of a larger population and thus, a sample size is not appropriate in this case.\n\n- *Is there information missing from the dataset and why? (this does not include intentionally dropped instances; it might include, e.g., redacted text, withheld documents) Is this data missing because it was unavailable?*\nNot all of the parameter values for every plugin used were documented. Occasionally a mix would include a saturator or a multiband compressor. Due to the low occurrence of these plugins, these were omitted for the annotating process.\n\n- *Are there any known errors, sources of noise, or redundancies in the data?*\nTo the author's knowledge, there are no errors or sources of noise within this dataset.\n\n- *Any other comments?*\n\nData Preprocessing \n- *What preprocessing/cleaning was done? (e.g., discretization or bucketing, tokenization, part-of-speech tagging, SIFT feature extraction, removal of instances, processing of missing values, etc.)*\nThe data preprocessing happened during the data collection stage for this dataset. Some of the data values were not available from the plugins that were used in a DAW session file. To help estimate the values on each of the parameters for that respective plugin, a tool was created and used by this author. If there wasn't a value for the parameter, the value was omitted from the data collection.\n\n- *Was the \"raw\" data saved in addition to the preprocessed/cleaned data? (e.g., to support unanticipated future uses)*\nThe raw data is still saved in the project files but was not annotated and, therefore, is not contained in this dataset. For the raw files of each mix, the reader should explore The Mix Evaluation dataset for these values.\n\n- *Is the preprocessing software available?*\nThe tool that was used to help the author annotate some of the parameter values is available for download here\n\n- *Does this dataset collection/processing procedure achieve the motivation for creating the dataset stated in the first section of this datasheet?*\nThe authors of this dataset intended to create an ethical source repository for AI music researchers to use for music mixing. We believe by using The Mix Evaluation dataset along with publically available music mixing projects, we have achieved our goal. Although this dataset is considerably smaller than what is required for most model architectures utilized in generative AI applications, we hope this is a positive addition to the field.\n\n- *Any other comments?*\n\nDataset Distribution \n- *How is the dataset distributed? (e.g., website, API, etc.; does the data have a DOI; is it archived redundantly?)*\nThis dataset is distributed via HuggingFace and will continue to be hosted there for the foreseeable future. There are no current plans to create an API, although a website for the dataset has been mentioned. The data is currently being archived redundantly through the University of Utah's Box account. Should HuggingFace go down or remove the dataset, the data themselves will remain at the University of Utah and will be uploaded to a separate website.\n\n- *When will the dataset be released/first distributed? (Is there a canonical paper/reference for this dataset?)*\nThe dataset, in its entirety, will be released on December 5th, 2023.\n\n- *What license (if any) is it distributed under? Are there any copyrights on the data?*\nThe license will be distributed via the MIT license. There are no copyrights on this data.\n\n- *Are there any fees or access/export restrictions?*\nThere are no fees or access/export restrictions for this dataset.\n\n- *Any other comments?*\n\nDataset Maintenance \n- *Who is supporting/hosting/maintaining the dataset? How does one contact the owner/curator/manager of the dataset (e.g. email address, or other contact info)?*\nHuggingFace is currently hosting the dataset and Michael Clemens (email: michael.clemens at URL) is maintaining the dataset.\n\n- *Will the dataset be updated? How often and by whom? How will updates/revisions be documented and communicated (e.g., mailing list, GitHub)? Is there an erratum?*\nThe release of this dataset is set to be *December 5th, 2023*. Updates and revisions will be documented through the repository through HuggingFace. There is currently no erratum, but should that be the case, this will be documented here as they come about.\n\n- *If the dataset becomes obsolete how will this be communicated?*\nShould the dataset no longer be valid, this will be communicated through the ReadMe right here on HF.\n \n- *Is there a repository to link to any/all papers/systems that use this dataset?*\nThere is no repo or link to any paper/systems that use the dataset. Should this dataset be used in the future for papers or system design, there will be a link to these works on this ReadMe, or a website will be created and linked here for the collection of works.\n\n- *If others want to extend/augment/build on this dataset, is there a mechanism for them to do so? If so, is there a process for tracking/assessing the quality of those contributions. What is the process for communicating/distributing these contributions to users?*\nThis dataset is an extension of The Mix Evaluation Dataset by Brecht De Man et al., and users are free to extend/augment/build on this dataset. There is no trackable way currently of assessing these contributions.\n- \n- *Any other comments?*\n\nLegal & Ethical Considerations\n- *If the dataset relates to people (e.g., their attributes) or was generated by people, were they informed about the data collection? (e.g., datasets that collect writing, photos, interactions, transactions, etc.)*\nAs this was a derivative of another work that performed the main data collection, the original music producers who mixed these tracks were not informed of the creation of this dataset.\n\n- *If it relates to other ethically protected subjects, have appropriate obligations been met? (e.g., medical data might include information collected from animals)*\nN/A\n\n- *If it relates to people, were there any ethical review applications/reviews/approvals? (e.g. Institutional Review Board applications)*\nAs this is an extension of the main dataset by Brecht De Man et al. and the data collection had already been conducted, an IRB was not included in this creation of this dataset. The data themselves are not related to the music producers but instead remain as an artifact of their work. Due to the nature of these data, an IRB was not needed.\n \n- *If it relates to people, were they told what the dataset would be used for and did they consent? What community norms exist for data collected from human communications? If consent was obtained, how? Were the people provided with any mechanism to revoke their consent in the future or for certain uses?*\nN/A\n\n- *If it relates to people, could this dataset expose people to harm or legal action? (e.g., financial social or otherwise) What was done to mitigate or reduce the potential for harm?*\nThe main initiative of this work was to create a dataset that was ethically sourced for parameter recommendations in the music-mixing process. With this, all of the data found here has been gathered from publically avaiable data from artists. Therefore no copyright or fair use infringement exists.\n \n- *If it relates to people, does it unfairly advantage or disadvantage a particular social group? In what ways? How was this mitigated? If it relates to people, were they provided with privacy guarantees? If so, what guarantees and how are these ensured?*\nN/A\n\n- *Does the dataset comply with the EU General Data Protection Regulation (GDPR)? Does it comply with any other standards, such as the US Equal Employment Opportunity Act? Does the dataset contain information that might be considered sensitive or confidential? (e.g., personally identifying information)*\nTo the authors' knowledge, this dataset complies with the laws mentioned above.\n \n- *Does the dataset contain information that might be considered inappropriate or offensive?*\nNo, this dataset does not contain any information like this.\n\n- *Any other comments?*" ]
[ 27, 3245 ]
[ "passage: TAGS\n#size_categories-n<1K #language-English #license-mit #music #region-us \n" ]
397a0de0deb1f5897f09d3fea7e38f3e559918da
# Dataset Card for "ha-en_RL-grow1_train" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
pranjali97/ha-en_RL-grow1_train
[ "region:us" ]
2023-11-04T03:29:53+00:00
{"dataset_info": {"features": [{"name": "src", "dtype": "string"}, {"name": "ref", "dtype": "string"}, {"name": "mt", "dtype": "string"}, {"name": "score", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 13578997, "num_examples": 29454}], "download_size": 3191264, "dataset_size": 13578997}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-04T03:29:55+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ha-en_RL-grow1_train" More Information needed
[ "# Dataset Card for \"ha-en_RL-grow1_train\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ha-en_RL-grow1_train\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ha-en_RL-grow1_train\"\n\nMore Information needed" ]
79286aa780a187f6d3e88be43c155a53759eac5f
# Dataset Card for "ha-en_RL-grow1_valid" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
pranjali97/ha-en_RL-grow1_valid
[ "region:us" ]
2023-11-04T03:31:01+00:00
{"dataset_info": {"features": [{"name": "src", "dtype": "string"}, {"name": "ref", "dtype": "string"}, {"name": "mt", "dtype": "string"}, {"name": "score", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 1553276, "num_examples": 3339}], "download_size": 369871, "dataset_size": 1553276}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-04T03:31:03+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ha-en_RL-grow1_valid" More Information needed
[ "# Dataset Card for \"ha-en_RL-grow1_valid\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ha-en_RL-grow1_valid\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ha-en_RL-grow1_valid\"\n\nMore Information needed" ]
3f015be43ebbe8ba76aa1c6c9bc69ac0e152b01a
# Dataset Card for "ko_hh-rlhf-20k_filtered" Synatra-Translation 모델로 번역된 20k rlhf셋입니다. 번역퀄이 뛰어나진 않습니다. 추가 대화문 등의 데이터 학습이 필요해보입니다. ## 베이스 데이터셋 [Anthropic/hh-rlhf](https://huggingface.co/datasets/Anthropic/hh-rlhf)
maywell/ko_hh-rlhf-20k_filtered
[ "region:us" ]
2023-11-04T03:38:26+00:00
{"dataset_info": {"features": [{"name": "chosen", "dtype": "string"}, {"name": "rejected", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 30828302, "num_examples": 19363}], "download_size": 15034439, "dataset_size": 30828302}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-04T18:45:29+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ko_hh-rlhf-20k_filtered" Synatra-Translation 모델로 번역된 20k rlhf셋입니다. 번역퀄이 뛰어나진 않습니다. 추가 대화문 등의 데이터 학습이 필요해보입니다. ## 베이스 데이터셋 Anthropic/hh-rlhf
[ "# Dataset Card for \"ko_hh-rlhf-20k_filtered\"\n\nSynatra-Translation 모델로 번역된 20k rlhf셋입니다. 번역퀄이 뛰어나진 않습니다. 추가 대화문 등의 데이터 학습이 필요해보입니다.", "## 베이스 데이터셋\nAnthropic/hh-rlhf" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ko_hh-rlhf-20k_filtered\"\n\nSynatra-Translation 모델로 번역된 20k rlhf셋입니다. 번역퀄이 뛰어나진 않습니다. 추가 대화문 등의 데이터 학습이 필요해보입니다.", "## 베이스 데이터셋\nAnthropic/hh-rlhf" ]
[ 6, 55, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ko_hh-rlhf-20k_filtered\"\n\nSynatra-Translation 모델로 번역된 20k rlhf셋입니다. 번역퀄이 뛰어나진 않습니다. 추가 대화문 등의 데이터 학습이 필요해보입니다.## 베이스 데이터셋\nAnthropic/hh-rlhf" ]
54a4a26a0a834774e78e0af8c8509420a03813c6
# Dataset Card for "ncbi_genbank_full_20231104" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Hack90/ncbi_genbank_full_20231104
[ "region:us" ]
2023-11-04T03:51:46+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "sequence", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "description", "dtype": "string"}, {"name": "features", "dtype": "int64"}, {"name": "seq_length", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 30201546596, "num_examples": 3245142}], "download_size": 13521263666, "dataset_size": 30201546596}}
2023-11-04T19:36:08+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ncbi_genbank_full_20231104" More Information needed
[ "# Dataset Card for \"ncbi_genbank_full_20231104\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ncbi_genbank_full_20231104\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ncbi_genbank_full_20231104\"\n\nMore Information needed" ]
cd54b0b03246f45ba7c3afd13c4c0e83c3ce64c7
# Dataset Card for "processed_bert_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
nicolastzj/processed_bert_dataset
[ "region:us" ]
2023-11-04T04:47:32+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "token_type_ids", "sequence": "int8"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "special_tokens_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 8473150800.0, "num_examples": 2353653}], "download_size": 2275859230, "dataset_size": 8473150800.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-04T05:08:16+00:00
[]
[]
TAGS #region-us
# Dataset Card for "processed_bert_dataset" More Information needed
[ "# Dataset Card for \"processed_bert_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"processed_bert_dataset\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"processed_bert_dataset\"\n\nMore Information needed" ]
ea75130c262836b2577461c6d43862834398ea69
# Dataset Card for "OpenOrca-trans-gpt4" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
SUSTech/OpenOrca-trans-gpt4
[ "region:us" ]
2023-11-04T05:51:46+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "en", "dtype": "string"}, {"name": "zh", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1655037, "num_examples": 621}], "download_size": 766655, "dataset_size": 1655037}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-04T05:51:50+00:00
[]
[]
TAGS #region-us
# Dataset Card for "OpenOrca-trans-gpt4" More Information needed
[ "# Dataset Card for \"OpenOrca-trans-gpt4\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"OpenOrca-trans-gpt4\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"OpenOrca-trans-gpt4\"\n\nMore Information needed" ]
296c9722709a5e8f1354012be7513ef3e1908a49
![logo](https://huggingface.co/squarelike/Gugugo-koen-7B-V1.1/resolve/main/logo.png) # **OpenOrca 한국어 번역 데이터셋** [Gugugo-koen-7B-V1.1](https://huggingface.co/squarelike/Gugugo-koen-7B-V1.1)을 이용하여 [OpenOrca](https://huggingface.co/datasets/Open-Orca/OpenOrca)데이터셋을 번역하고 있습니다. 번역 진행상황은 아래를 참고해 주십시오. ## 진행상황 - GPT4 생성물 약 100만 개 중 약 64만 개 번역완료 - GPT3.5 생성물 약 350만 개 중 약 159만 개 번역완료 데이터셋 사용 후 출처표기는 제작자에게 큰 힘이 됩니다. # Original dataset card: OpenOrca ## Table of Contents - [Dataset Summary](#dataset-summary) - [Dataset Attribution](#dataset-attribution) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Dataset Use](#dataset-use) - [Use Cases](#use-cases) - [Usage Caveats](#usage-caveats) - [Getting Started](#getting-started) <p><h1>🐋 The OpenOrca Dataset! 🐋</h1></p> ![OpenOrca Logo](https://huggingface.co/datasets/Open-Orca/OpenOrca/resolve/main/OpenOrcaLogo.png "OpenOrca Logo") <a name="dataset-announcement"></a> We are thrilled to announce the release of the OpenOrca dataset! This rich collection of augmented FLAN data aligns, as best as possible, with the distributions outlined in the [Orca paper](https://arxiv.org/abs/2306.02707). It has been instrumental in generating high-performing model checkpoints and serves as a valuable resource for all NLP researchers and developers! # Official Models ## Mistral-7B-OpenOrca Our [latest model](https://huggingface.co/spaces/Open-Orca/Mistral-7B-OpenOrca), the first 7B to score better overall than all previous models below 30B. 98% of Llama2-70b-chat's performance, in a completely open 7B! ## OpenOrca-Platypus2-13B Our [third model](https://huggingface.co/Open-Orca/OpenOrca-Platypus2-13B), the first 13B model to score higher than LLaMA1-65B on the HuggingFace Leaderboard! Released in partnership with Platypus. ## LlongOrca 7B & 13B * Our [first 7B release](https://huggingface.co/Open-Orca/LlongOrca-7B-16k), trained on top of LLongMA2 to achieve 16,000 tokens context. #1 long context 7B model at release time, with >99% of the overall #1 model's performance. * [LlongOrca-13B-16k](https://huggingface.co/Open-Orca/LlongOrca-13B-16k), trained on top of LLongMA2. #1 long context 13B model at release time, with >97% of the overall #1 model's performance. ## OpenOrcaxOpenChat-Preview2-13B Our [second model](https://huggingface.co/Open-Orca/OpenOrcaxOpenChat-Preview2-13B), highlighting that we've surpassed the performance reported in the Orca paper. Was #1 at release time, now surpassed by our own OpenOrca-Platypus2-13B. Released in partnership with OpenChat. ## OpenOrca-Preview1-13B [OpenOrca-Preview1-13B](https://huggingface.co/Open-Orca/OpenOrca-Preview1-13B) This model was trained in less than a day, for <$200, with <10% of our data. At release, it beat the current state of the art models on BigBench-Hard and AGIEval. Achieves ~60% of the improvements reported in the Orca paper. <a name="dataset-summary"></a> # Dataset Summary The OpenOrca dataset is a collection of augmented [FLAN Collection data](https://arxiv.org/abs/2301.13688). Currently ~1M GPT-4 completions, and ~3.2M GPT-3.5 completions. It is tabularized in alignment with the distributions presented in the ORCA paper and currently represents a partial completion of the full intended dataset, with ongoing generation to expand its scope. The data is primarily used for training and evaluation in the field of natural language processing. <a name="dataset-attribution"></a> # Dataset Attribution We would like to give special recognition to the following contributors for their significant efforts and dedication: Teknium WingLian/Caseus Eric Hartford NanoBit Pankaj Winddude Rohan http://AlignmentLab.ai: Autometa Entropi AtlasUnified NeverendingToast NanoBit WingLian/Caseus Also of course, as always, TheBloke, for being the backbone of the whole community. Many thanks to NanoBit and Caseus, makers of [Axolotl](https://github.com/OpenAccess-AI-Collective/axolotl), for lending us their expertise on the platform that developed and trained manticore, minotaur, and many others! We are welcoming sponsors or collaborators to help us build these models to the scale they deserve. Please reach out via our socials: http://Alignmentlab.ai https://discord.gg/n9hXaBPWxx Want to visualize our full dataset? Check out our [Nomic Atlas Map](https://atlas.nomic.ai/map/c1b88b47-2d9b-47e0-9002-b80766792582/2560fd25-52fe-42f1-a58f-ff5eccc890d2). [<img src="https://huggingface.co/Open-Orca/OpenOrca-Preview1-13B/resolve/main/OpenOrca%20Nomic%20Atlas.png" alt="Atlas Nomic Dataset Map" width="400" height="400" />](https://atlas.nomic.ai/map/c1b88b47-2d9b-47e0-9002-b80766792582/2560fd25-52fe-42f1-a58f-ff5eccc890d2) <a name="supported-tasks-and-leaderboards"></a> # Supported Tasks and Leaderboards This dataset supports a range of tasks including language modeling, text generation, and text augmentation. It has been instrumental in the generation of multiple high-performing model checkpoints which have exhibited exceptional performance in our unit testing. Further information on leaderboards will be updated as they become available. <a name="languages"></a> # Languages The language of the data is primarily English. <a name="dataset-structure"></a> # Dataset Structure <a name="data-instances"></a> ## Data Instances A data instance in this dataset represents entries from the FLAN collection which have been augmented by submitting the listed question to either GPT-4 or GPT-3.5. The response is then entered into the response field. <a name="data-fields"></a> ## Data Fields The fields are: 1) 'id', a unique numbered identifier which includes one of 'niv', 't0', 'cot', or 'flan' to represent which source FLAN Collection submix the 'question' is sourced from. 2) 'system_prompt', representing the System Prompt presented to the GPT-3.5 or GPT-4 API for the datapoint 3) 'question', representing a question entry as provided by the FLAN Collection 4) 'response', a response to that question received from a query to either GPT-3.5 or GPT-4. <a name="data-splits"></a> ## Data Splits The data is unsplit. <a name="dataset-creation"></a> # Dataset Creation <a name="curation-rationale"></a> ## Curation Rationale The dataset was created to provide a source of augmented text data for researchers and developers. The datapoints are intended primarily to provide an enhancement of the core FLAN Collection data which relies upon the detailed step by step reasoning capabilities of GPT-3.5 and GPT-4. This "reasoning trace" augmentation has demonstrated exceptional results, allowing a LLaMA-13B model trained with this data to rival or beat GPT-3.5 on broad sets of hard reasoning tasks which all models below 100B parameters had previously performed dramatically worse on. <a name="source-data"></a> ## Source Data The data is generated using techniques in alignment with the distributions outlined in the Orca paper, except as noted below: 1) There is not enough CoT data in the FLAN Collection to generate 150K zero-shot entries, as the paper purports to use. We suspect this portion was either undocumented or misrepresented. We have used the ~75K points available. 2) We used the pre-generated FLAN Collection datasets hosted on HuggingFace under conceptofmind, e.g. [conceptofmind/flan2021](https://huggingface.co/datasets/conceptofmind/flan2021_submix_original). These are referenced by the [official FLAN Collection repo](https://github.com/google-research/FLAN/tree/main/flan/v2) as the preferred data source. However, these are a subset of the full FLAN Collection data, and have less than the required entries for the flan2021 and t0 submixes, by ~1.25M and 200k respectively. Combined, this gave us ~1.5M fewer datapoints than in the original Orca paper. Completing the set is an ongoing work. <a name="dataset-use"></a> # Dataset Use <a name="use-cases"></a> ## Use Cases The dataset can be used for tasks related to language understanding, natural language processing, machine learning model training, and model performance evaluation. <a name="usage-caveats"></a> ## Usage Caveats Given that this is a work-in-progress dataset, it is recommended to regularly check for updates and improvements. Further, the data should be used in accordance with the guidelines and recommendations outlined in the Orca paper. <a name="getting-started"></a> ## Getting Started This dataset is organized such that it can be naively loaded via Hugging Face datasets library. We recommend using streaming due to the large size of the files. Regular updates and data generation progress can be monitored through the OpenOrca repository on Hugging Face. # Citation ```bibtex @misc{OpenOrca, title = {OpenOrca: An Open Dataset of GPT Augmented FLAN Reasoning Traces}, author = {Wing Lian and Bleys Goodson and Eugene Pentland and Austin Cook and Chanvichet Vong and "Teknium"}, year = {2023}, publisher = {HuggingFace}, journal = {HuggingFace repository}, howpublished = {\url{https://https://huggingface.co/Open-Orca/OpenOrca}}, } ``` ```bibtex @misc{mukherjee2023orca, title={Orca: Progressive Learning from Complex Explanation Traces of GPT-4}, author={Subhabrata Mukherjee and Arindam Mitra and Ganesh Jawahar and Sahaj Agarwal and Hamid Palangi and Ahmed Awadallah}, year={2023}, eprint={2306.02707}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ```bibtex @misc{longpre2023flan, title={The Flan Collection: Designing Data and Methods for Effective Instruction Tuning}, author={Shayne Longpre and Le Hou and Tu Vu and Albert Webson and Hyung Won Chung and Yi Tay and Denny Zhou and Quoc V. Le and Barret Zoph and Jason Wei and Adam Roberts}, year={2023}, eprint={2301.13688}, archivePrefix={arXiv}, primaryClass={cs.AI} } ``` ```bibtex @misc{touvron2023llama, title={Llama 2: Open Foundation and Fine-Tuned Chat Models}, author={Hugo Touvron and Louis Martin and Kevin Stone and Peter Albert and Amjad Almahairi and Yasmine Babaei and Nikolay Bashlykov and Soumya Batra and Prajjwal Bhargava and Shruti Bhosale and Dan Bikel and Lukas Blecher and Cristian Canton Ferrer and Moya Chen and Guillem Cucurull and David Esiobu and Jude Fernandes and Jeremy Fu and Wenyin Fu and Brian Fuller and Cynthia Gao and Vedanuj Goswami and Naman Goyal and Anthony Hartshorn and Saghar Hosseini and Rui Hou and Hakan Inan and Marcin Kardas and Viktor Kerkez and Madian Khabsa and Isabel Kloumann and Artem Korenev and Punit Singh Koura and Marie-Anne Lachaux and Thibaut Lavril and Jenya Lee and Diana Liskovich and Yinghai Lu and Yuning Mao and Xavier Martinet and Todor Mihaylov and Pushkar Mishra and Igor Molybog and Yixin Nie and Andrew Poulton and Jeremy Reizenstein and Rashi Rungta and Kalyan Saladi and Alan Schelten and Ruan Silva and Eric Michael Smith and Ranjan Subramanian and Xiaoqing Ellen Tan and Binh Tang and Ross Taylor and Adina Williams and Jian Xiang Kuan and Puxin Xu and Zheng Yan and Iliyan Zarov and Yuchen Zhang and Angela Fan and Melanie Kambadur and Sharan Narang and Aurelien Rodriguez and Robert Stojnic and Sergey Edunov and Thomas Scialom}, year={2023}, eprint= arXiv 2307.09288 } @software{touvron2023llama, title={LLaMA: Open and Efficient Foundation Language Models}, author={Touvron, Hugo and Lavril, Thibaut and Izacard, Gautier and Martinet, Xavier and Lachaux, Marie-Anne and Lacroix, Timoth{\'e}e and Rozi{\`e}re, Baptiste and Goyal, Naman and Hambro, Eric and Azhar, Faisal and Rodriguez, Aurelien and Joulin, Armand and Grave, Edouard and Lample, Guillaume}, journal={arXiv preprint arXiv:2302.13971}, year={2023} } ```
squarelike/OpenOrca-gugugo-ko
[ "task_categories:conversational", "task_categories:text-classification", "task_categories:token-classification", "task_categories:table-question-answering", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:summarization", "task_categories:feature-extraction", "task_categories:text-generation", "task_categories:text2text-generation", "size_categories:10M<n<100M", "language:ko", "license:mit", "arxiv:2306.02707", "arxiv:2301.13688", "region:us" ]
2023-11-04T05:57:56+00:00
{"language": ["ko"], "license": "mit", "size_categories": ["10M<n<100M"], "task_categories": ["conversational", "text-classification", "token-classification", "table-question-answering", "question-answering", "zero-shot-classification", "summarization", "feature-extraction", "text-generation", "text2text-generation"], "pretty_name": "OpenOrca"}
2023-11-12T22:41:06+00:00
[ "2306.02707", "2301.13688" ]
[ "ko" ]
TAGS #task_categories-conversational #task_categories-text-classification #task_categories-token-classification #task_categories-table-question-answering #task_categories-question-answering #task_categories-zero-shot-classification #task_categories-summarization #task_categories-feature-extraction #task_categories-text-generation #task_categories-text2text-generation #size_categories-10M<n<100M #language-Korean #license-mit #arxiv-2306.02707 #arxiv-2301.13688 #region-us
!logo # OpenOrca 한국어 번역 데이터셋 Gugugo-koen-7B-V1.1을 이용하여 OpenOrca데이터셋을 번역하고 있습니다. 번역 진행상황은 아래를 참고해 주십시오. ## 진행상황 - GPT4 생성물 약 100만 개 중 약 64만 개 번역완료 - GPT3.5 생성물 약 350만 개 중 약 159만 개 번역완료 데이터셋 사용 후 출처표기는 제작자에게 큰 힘이 됩니다. # Original dataset card: OpenOrca ## Table of Contents - Dataset Summary - Dataset Attribution - Supported Tasks and Leaderboards - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Dataset Use - Use Cases - Usage Caveats - Getting Started <p><h1> The OpenOrca Dataset! </h1></p> !OpenOrca Logo <a name="dataset-announcement"></a> We are thrilled to announce the release of the OpenOrca dataset! This rich collection of augmented FLAN data aligns, as best as possible, with the distributions outlined in the Orca paper. It has been instrumental in generating high-performing model checkpoints and serves as a valuable resource for all NLP researchers and developers! # Official Models ## Mistral-7B-OpenOrca Our latest model, the first 7B to score better overall than all previous models below 30B. 98% of Llama2-70b-chat's performance, in a completely open 7B! ## OpenOrca-Platypus2-13B Our third model, the first 13B model to score higher than LLaMA1-65B on the HuggingFace Leaderboard! Released in partnership with Platypus. ## LlongOrca 7B & 13B * Our first 7B release, trained on top of LLongMA2 to achieve 16,000 tokens context. #1 long context 7B model at release time, with >99% of the overall #1 model's performance. * LlongOrca-13B-16k, trained on top of LLongMA2. #1 long context 13B model at release time, with >97% of the overall #1 model's performance. ## OpenOrcaxOpenChat-Preview2-13B Our second model, highlighting that we've surpassed the performance reported in the Orca paper. Was #1 at release time, now surpassed by our own OpenOrca-Platypus2-13B. Released in partnership with OpenChat. ## OpenOrca-Preview1-13B OpenOrca-Preview1-13B This model was trained in less than a day, for <$200, with <10% of our data. At release, it beat the current state of the art models on BigBench-Hard and AGIEval. Achieves ~60% of the improvements reported in the Orca paper. <a name="dataset-summary"></a> # Dataset Summary The OpenOrca dataset is a collection of augmented FLAN Collection data. Currently ~1M GPT-4 completions, and ~3.2M GPT-3.5 completions. It is tabularized in alignment with the distributions presented in the ORCA paper and currently represents a partial completion of the full intended dataset, with ongoing generation to expand its scope. The data is primarily used for training and evaluation in the field of natural language processing. <a name="dataset-attribution"></a> # Dataset Attribution We would like to give special recognition to the following contributors for their significant efforts and dedication: Teknium WingLian/Caseus Eric Hartford NanoBit Pankaj Winddude Rohan URL: Autometa Entropi AtlasUnified NeverendingToast NanoBit WingLian/Caseus Also of course, as always, TheBloke, for being the backbone of the whole community. Many thanks to NanoBit and Caseus, makers of Axolotl, for lending us their expertise on the platform that developed and trained manticore, minotaur, and many others! We are welcoming sponsors or collaborators to help us build these models to the scale they deserve. Please reach out via our socials: URL URL Want to visualize our full dataset? Check out our Nomic Atlas Map. <img src="URL alt="Atlas Nomic Dataset Map" width="400" height="400" /> <a name="supported-tasks-and-leaderboards"></a> # Supported Tasks and Leaderboards This dataset supports a range of tasks including language modeling, text generation, and text augmentation. It has been instrumental in the generation of multiple high-performing model checkpoints which have exhibited exceptional performance in our unit testing. Further information on leaderboards will be updated as they become available. <a name="languages"></a> # Languages The language of the data is primarily English. <a name="dataset-structure"></a> # Dataset Structure <a name="data-instances"></a> ## Data Instances A data instance in this dataset represents entries from the FLAN collection which have been augmented by submitting the listed question to either GPT-4 or GPT-3.5. The response is then entered into the response field. <a name="data-fields"></a> ## Data Fields The fields are: 1) 'id', a unique numbered identifier which includes one of 'niv', 't0', 'cot', or 'flan' to represent which source FLAN Collection submix the 'question' is sourced from. 2) 'system_prompt', representing the System Prompt presented to the GPT-3.5 or GPT-4 API for the datapoint 3) 'question', representing a question entry as provided by the FLAN Collection 4) 'response', a response to that question received from a query to either GPT-3.5 or GPT-4. <a name="data-splits"></a> ## Data Splits The data is unsplit. <a name="dataset-creation"></a> # Dataset Creation <a name="curation-rationale"></a> ## Curation Rationale The dataset was created to provide a source of augmented text data for researchers and developers. The datapoints are intended primarily to provide an enhancement of the core FLAN Collection data which relies upon the detailed step by step reasoning capabilities of GPT-3.5 and GPT-4. This "reasoning trace" augmentation has demonstrated exceptional results, allowing a LLaMA-13B model trained with this data to rival or beat GPT-3.5 on broad sets of hard reasoning tasks which all models below 100B parameters had previously performed dramatically worse on. <a name="source-data"></a> ## Source Data The data is generated using techniques in alignment with the distributions outlined in the Orca paper, except as noted below: 1) There is not enough CoT data in the FLAN Collection to generate 150K zero-shot entries, as the paper purports to use. We suspect this portion was either undocumented or misrepresented. We have used the ~75K points available. 2) We used the pre-generated FLAN Collection datasets hosted on HuggingFace under conceptofmind, e.g. conceptofmind/flan2021. These are referenced by the official FLAN Collection repo as the preferred data source. However, these are a subset of the full FLAN Collection data, and have less than the required entries for the flan2021 and t0 submixes, by ~1.25M and 200k respectively. Combined, this gave us ~1.5M fewer datapoints than in the original Orca paper. Completing the set is an ongoing work. <a name="dataset-use"></a> # Dataset Use <a name="use-cases"></a> ## Use Cases The dataset can be used for tasks related to language understanding, natural language processing, machine learning model training, and model performance evaluation. <a name="usage-caveats"></a> ## Usage Caveats Given that this is a work-in-progress dataset, it is recommended to regularly check for updates and improvements. Further, the data should be used in accordance with the guidelines and recommendations outlined in the Orca paper. <a name="getting-started"></a> ## Getting Started This dataset is organized such that it can be naively loaded via Hugging Face datasets library. We recommend using streaming due to the large size of the files. Regular updates and data generation progress can be monitored through the OpenOrca repository on Hugging Face.
[ "# OpenOrca 한국어 번역 데이터셋\n\nGugugo-koen-7B-V1.1을 이용하여 OpenOrca데이터셋을 번역하고 있습니다.\n번역 진행상황은 아래를 참고해 주십시오.", "## 진행상황\n- GPT4 생성물 약 100만 개 중 약 64만 개 번역완료\n- GPT3.5 생성물 약 350만 개 중 약 159만 개 번역완료\n\n데이터셋 사용 후 출처표기는 제작자에게 큰 힘이 됩니다.", "# Original dataset card: OpenOrca", "## Table of Contents\n- Dataset Summary\n- Dataset Attribution\n- Supported Tasks and Leaderboards\n- Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n- Dataset Use\n - Use Cases\n - Usage Caveats\n - Getting Started\n\n\n<p><h1> The OpenOrca Dataset! </h1></p>\n\n!OpenOrca Logo\n\n<a name=\"dataset-announcement\"></a>\n\nWe are thrilled to announce the release of the OpenOrca dataset!\nThis rich collection of augmented FLAN data aligns, as best as possible, with the distributions outlined in the Orca paper.\nIt has been instrumental in generating high-performing model checkpoints and serves as a valuable resource for all NLP researchers and developers!", "# Official Models", "## Mistral-7B-OpenOrca\n\nOur latest model, the first 7B to score better overall than all previous models below 30B.\n98% of Llama2-70b-chat's performance, in a completely open 7B!", "## OpenOrca-Platypus2-13B\n\nOur third model, the first 13B model to score higher than LLaMA1-65B on the HuggingFace Leaderboard!\nReleased in partnership with Platypus.", "## LlongOrca 7B & 13B\n\n* Our first 7B release, trained on top of LLongMA2 to achieve 16,000 tokens context. #1 long context 7B model at release time, with >99% of the overall #1 model's performance.\n* LlongOrca-13B-16k, trained on top of LLongMA2. #1 long context 13B model at release time, with >97% of the overall #1 model's performance.", "## OpenOrcaxOpenChat-Preview2-13B\n\nOur second model, highlighting that we've surpassed the performance reported in the Orca paper.\nWas #1 at release time, now surpassed by our own OpenOrca-Platypus2-13B.\nReleased in partnership with OpenChat.", "## OpenOrca-Preview1-13B\n\nOpenOrca-Preview1-13B\nThis model was trained in less than a day, for <$200, with <10% of our data.\nAt release, it beat the current state of the art models on BigBench-Hard and AGIEval. Achieves ~60% of the improvements reported in the Orca paper.\n\n<a name=\"dataset-summary\"></a>", "# Dataset Summary\n\nThe OpenOrca dataset is a collection of augmented FLAN Collection data.\nCurrently ~1M GPT-4 completions, and ~3.2M GPT-3.5 completions.\nIt is tabularized in alignment with the distributions presented in the ORCA paper and currently represents a partial completion of the full intended dataset, with ongoing generation to expand its scope.\nThe data is primarily used for training and evaluation in the field of natural language processing.\n\n<a name=\"dataset-attribution\"></a>", "# Dataset Attribution\n\nWe would like to give special recognition to the following contributors for their significant efforts and dedication:\n \n Teknium \n WingLian/Caseus\n Eric Hartford\n NanoBit\n Pankaj\n Winddude\n Rohan\n URL:\n Autometa\n Entropi\n AtlasUnified\n NeverendingToast\n NanoBit\n WingLian/Caseus\nAlso of course, as always, TheBloke, for being the backbone of the whole community.\n\nMany thanks to NanoBit and Caseus, makers of Axolotl, for lending us their expertise on the platform that developed and trained manticore, minotaur, and many others! \n\nWe are welcoming sponsors or collaborators to help us build these models to the scale they deserve. Please reach out via our socials:\nURL URL\n\nWant to visualize our full dataset? Check out our Nomic Atlas Map.\n <img src=\"URL alt=\"Atlas Nomic Dataset Map\" width=\"400\" height=\"400\" />\n\n\n<a name=\"supported-tasks-and-leaderboards\"></a>", "# Supported Tasks and Leaderboards\n\nThis dataset supports a range of tasks including language modeling, text generation, and text augmentation.\nIt has been instrumental in the generation of multiple high-performing model checkpoints which have exhibited exceptional performance in our unit testing.\nFurther information on leaderboards will be updated as they become available.\n\n<a name=\"languages\"></a>", "# Languages\n\nThe language of the data is primarily English.\n\n<a name=\"dataset-structure\"></a>", "# Dataset Structure\n\n<a name=\"data-instances\"></a>", "## Data Instances\n\nA data instance in this dataset represents entries from the FLAN collection which have been augmented by submitting the listed question to either GPT-4 or GPT-3.5.\nThe response is then entered into the response field.\n\n<a name=\"data-fields\"></a>", "## Data Fields\n\nThe fields are:\n1) 'id', a unique numbered identifier which includes one of 'niv', 't0', 'cot', or 'flan' to represent which source FLAN Collection submix the 'question' is sourced from.\n2) 'system_prompt', representing the System Prompt presented to the GPT-3.5 or GPT-4 API for the datapoint\n3) 'question', representing a question entry as provided by the FLAN Collection\n4) 'response', a response to that question received from a query to either GPT-3.5 or GPT-4.\n<a name=\"data-splits\"></a>", "## Data Splits\nThe data is unsplit.\n<a name=\"dataset-creation\"></a>", "# Dataset Creation\n<a name=\"curation-rationale\"></a>", "## Curation Rationale\nThe dataset was created to provide a source of augmented text data for researchers and developers.\nThe datapoints are intended primarily to provide an enhancement of the core FLAN Collection data which relies upon the detailed step by step reasoning capabilities of GPT-3.5 and GPT-4.\nThis \"reasoning trace\" augmentation has demonstrated exceptional results, allowing a LLaMA-13B model trained with this data to rival or beat GPT-3.5 on broad sets of hard reasoning tasks which all models below 100B parameters had previously performed dramatically worse on.\n<a name=\"source-data\"></a>", "## Source Data\nThe data is generated using techniques in alignment with the distributions outlined in the Orca paper, except as noted below:\n1) There is not enough CoT data in the FLAN Collection to generate 150K zero-shot entries, as the paper purports to use.\n We suspect this portion was either undocumented or misrepresented. We have used the ~75K points available.\n2) We used the pre-generated FLAN Collection datasets hosted on HuggingFace under conceptofmind, e.g. conceptofmind/flan2021.\n These are referenced by the official FLAN Collection repo as the preferred data source.\n However, these are a subset of the full FLAN Collection data, and have less than the required entries for the flan2021 and t0 submixes, by ~1.25M and 200k respectively.\nCombined, this gave us ~1.5M fewer datapoints than in the original Orca paper. Completing the set is an ongoing work.\n<a name=\"dataset-use\"></a>", "# Dataset Use\n<a name=\"use-cases\"></a>", "## Use Cases\nThe dataset can be used for tasks related to language understanding, natural language processing, machine learning model training, and model performance evaluation.\n<a name=\"usage-caveats\"></a>", "## Usage Caveats\nGiven that this is a work-in-progress dataset, it is recommended to regularly check for updates and improvements.\nFurther, the data should be used in accordance with the guidelines and recommendations outlined in the Orca paper.\n<a name=\"getting-started\"></a>", "## Getting Started\nThis dataset is organized such that it can be naively loaded via Hugging Face datasets library.\nWe recommend using streaming due to the large size of the files.\nRegular updates and data generation progress can be monitored through the OpenOrca repository on Hugging Face." ]
[ "TAGS\n#task_categories-conversational #task_categories-text-classification #task_categories-token-classification #task_categories-table-question-answering #task_categories-question-answering #task_categories-zero-shot-classification #task_categories-summarization #task_categories-feature-extraction #task_categories-text-generation #task_categories-text2text-generation #size_categories-10M<n<100M #language-Korean #license-mit #arxiv-2306.02707 #arxiv-2301.13688 #region-us \n", "# OpenOrca 한국어 번역 데이터셋\n\nGugugo-koen-7B-V1.1을 이용하여 OpenOrca데이터셋을 번역하고 있습니다.\n번역 진행상황은 아래를 참고해 주십시오.", "## 진행상황\n- GPT4 생성물 약 100만 개 중 약 64만 개 번역완료\n- GPT3.5 생성물 약 350만 개 중 약 159만 개 번역완료\n\n데이터셋 사용 후 출처표기는 제작자에게 큰 힘이 됩니다.", "# Original dataset card: OpenOrca", "## Table of Contents\n- Dataset Summary\n- Dataset Attribution\n- Supported Tasks and Leaderboards\n- Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n- Dataset Use\n - Use Cases\n - Usage Caveats\n - Getting Started\n\n\n<p><h1> The OpenOrca Dataset! </h1></p>\n\n!OpenOrca Logo\n\n<a name=\"dataset-announcement\"></a>\n\nWe are thrilled to announce the release of the OpenOrca dataset!\nThis rich collection of augmented FLAN data aligns, as best as possible, with the distributions outlined in the Orca paper.\nIt has been instrumental in generating high-performing model checkpoints and serves as a valuable resource for all NLP researchers and developers!", "# Official Models", "## Mistral-7B-OpenOrca\n\nOur latest model, the first 7B to score better overall than all previous models below 30B.\n98% of Llama2-70b-chat's performance, in a completely open 7B!", "## OpenOrca-Platypus2-13B\n\nOur third model, the first 13B model to score higher than LLaMA1-65B on the HuggingFace Leaderboard!\nReleased in partnership with Platypus.", "## LlongOrca 7B & 13B\n\n* Our first 7B release, trained on top of LLongMA2 to achieve 16,000 tokens context. #1 long context 7B model at release time, with >99% of the overall #1 model's performance.\n* LlongOrca-13B-16k, trained on top of LLongMA2. #1 long context 13B model at release time, with >97% of the overall #1 model's performance.", "## OpenOrcaxOpenChat-Preview2-13B\n\nOur second model, highlighting that we've surpassed the performance reported in the Orca paper.\nWas #1 at release time, now surpassed by our own OpenOrca-Platypus2-13B.\nReleased in partnership with OpenChat.", "## OpenOrca-Preview1-13B\n\nOpenOrca-Preview1-13B\nThis model was trained in less than a day, for <$200, with <10% of our data.\nAt release, it beat the current state of the art models on BigBench-Hard and AGIEval. Achieves ~60% of the improvements reported in the Orca paper.\n\n<a name=\"dataset-summary\"></a>", "# Dataset Summary\n\nThe OpenOrca dataset is a collection of augmented FLAN Collection data.\nCurrently ~1M GPT-4 completions, and ~3.2M GPT-3.5 completions.\nIt is tabularized in alignment with the distributions presented in the ORCA paper and currently represents a partial completion of the full intended dataset, with ongoing generation to expand its scope.\nThe data is primarily used for training and evaluation in the field of natural language processing.\n\n<a name=\"dataset-attribution\"></a>", "# Dataset Attribution\n\nWe would like to give special recognition to the following contributors for their significant efforts and dedication:\n \n Teknium \n WingLian/Caseus\n Eric Hartford\n NanoBit\n Pankaj\n Winddude\n Rohan\n URL:\n Autometa\n Entropi\n AtlasUnified\n NeverendingToast\n NanoBit\n WingLian/Caseus\nAlso of course, as always, TheBloke, for being the backbone of the whole community.\n\nMany thanks to NanoBit and Caseus, makers of Axolotl, for lending us their expertise on the platform that developed and trained manticore, minotaur, and many others! \n\nWe are welcoming sponsors or collaborators to help us build these models to the scale they deserve. Please reach out via our socials:\nURL URL\n\nWant to visualize our full dataset? Check out our Nomic Atlas Map.\n <img src=\"URL alt=\"Atlas Nomic Dataset Map\" width=\"400\" height=\"400\" />\n\n\n<a name=\"supported-tasks-and-leaderboards\"></a>", "# Supported Tasks and Leaderboards\n\nThis dataset supports a range of tasks including language modeling, text generation, and text augmentation.\nIt has been instrumental in the generation of multiple high-performing model checkpoints which have exhibited exceptional performance in our unit testing.\nFurther information on leaderboards will be updated as they become available.\n\n<a name=\"languages\"></a>", "# Languages\n\nThe language of the data is primarily English.\n\n<a name=\"dataset-structure\"></a>", "# Dataset Structure\n\n<a name=\"data-instances\"></a>", "## Data Instances\n\nA data instance in this dataset represents entries from the FLAN collection which have been augmented by submitting the listed question to either GPT-4 or GPT-3.5.\nThe response is then entered into the response field.\n\n<a name=\"data-fields\"></a>", "## Data Fields\n\nThe fields are:\n1) 'id', a unique numbered identifier which includes one of 'niv', 't0', 'cot', or 'flan' to represent which source FLAN Collection submix the 'question' is sourced from.\n2) 'system_prompt', representing the System Prompt presented to the GPT-3.5 or GPT-4 API for the datapoint\n3) 'question', representing a question entry as provided by the FLAN Collection\n4) 'response', a response to that question received from a query to either GPT-3.5 or GPT-4.\n<a name=\"data-splits\"></a>", "## Data Splits\nThe data is unsplit.\n<a name=\"dataset-creation\"></a>", "# Dataset Creation\n<a name=\"curation-rationale\"></a>", "## Curation Rationale\nThe dataset was created to provide a source of augmented text data for researchers and developers.\nThe datapoints are intended primarily to provide an enhancement of the core FLAN Collection data which relies upon the detailed step by step reasoning capabilities of GPT-3.5 and GPT-4.\nThis \"reasoning trace\" augmentation has demonstrated exceptional results, allowing a LLaMA-13B model trained with this data to rival or beat GPT-3.5 on broad sets of hard reasoning tasks which all models below 100B parameters had previously performed dramatically worse on.\n<a name=\"source-data\"></a>", "## Source Data\nThe data is generated using techniques in alignment with the distributions outlined in the Orca paper, except as noted below:\n1) There is not enough CoT data in the FLAN Collection to generate 150K zero-shot entries, as the paper purports to use.\n We suspect this portion was either undocumented or misrepresented. We have used the ~75K points available.\n2) We used the pre-generated FLAN Collection datasets hosted on HuggingFace under conceptofmind, e.g. conceptofmind/flan2021.\n These are referenced by the official FLAN Collection repo as the preferred data source.\n However, these are a subset of the full FLAN Collection data, and have less than the required entries for the flan2021 and t0 submixes, by ~1.25M and 200k respectively.\nCombined, this gave us ~1.5M fewer datapoints than in the original Orca paper. Completing the set is an ongoing work.\n<a name=\"dataset-use\"></a>", "# Dataset Use\n<a name=\"use-cases\"></a>", "## Use Cases\nThe dataset can be used for tasks related to language understanding, natural language processing, machine learning model training, and model performance evaluation.\n<a name=\"usage-caveats\"></a>", "## Usage Caveats\nGiven that this is a work-in-progress dataset, it is recommended to regularly check for updates and improvements.\nFurther, the data should be used in accordance with the guidelines and recommendations outlined in the Orca paper.\n<a name=\"getting-started\"></a>", "## Getting Started\nThis dataset is organized such that it can be naively loaded via Hugging Face datasets library.\nWe recommend using streaming due to the large size of the files.\nRegular updates and data generation progress can be monitored through the OpenOrca repository on Hugging Face." ]
[ 163, 44, 55, 9, 199, 4, 49, 48, 98, 67, 95, 122, 233, 86, 25, 19, 67, 153, 24, 18, 146, 235, 16, 46, 70, 66 ]
[ "passage: TAGS\n#task_categories-conversational #task_categories-text-classification #task_categories-token-classification #task_categories-table-question-answering #task_categories-question-answering #task_categories-zero-shot-classification #task_categories-summarization #task_categories-feature-extraction #task_categories-text-generation #task_categories-text2text-generation #size_categories-10M<n<100M #language-Korean #license-mit #arxiv-2306.02707 #arxiv-2301.13688 #region-us \n# OpenOrca 한국어 번역 데이터셋\n\nGugugo-koen-7B-V1.1을 이용하여 OpenOrca데이터셋을 번역하고 있습니다.\n번역 진행상황은 아래를 참고해 주십시오.## 진행상황\n- GPT4 생성물 약 100만 개 중 약 64만 개 번역완료\n- GPT3.5 생성물 약 350만 개 중 약 159만 개 번역완료\n\n데이터셋 사용 후 출처표기는 제작자에게 큰 힘이 됩니다.# Original dataset card: OpenOrca## Table of Contents\n- Dataset Summary\n- Dataset Attribution\n- Supported Tasks and Leaderboards\n- Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n- Dataset Use\n - Use Cases\n - Usage Caveats\n - Getting Started\n\n\n<p><h1> The OpenOrca Dataset! </h1></p>\n\n!OpenOrca Logo\n\n<a name=\"dataset-announcement\"></a>\n\nWe are thrilled to announce the release of the OpenOrca dataset!\nThis rich collection of augmented FLAN data aligns, as best as possible, with the distributions outlined in the Orca paper.\nIt has been instrumental in generating high-performing model checkpoints and serves as a valuable resource for all NLP researchers and developers!# Official Models", "passage: ## Mistral-7B-OpenOrca\n\nOur latest model, the first 7B to score better overall than all previous models below 30B.\n98% of Llama2-70b-chat's performance, in a completely open 7B!## OpenOrca-Platypus2-13B\n\nOur third model, the first 13B model to score higher than LLaMA1-65B on the HuggingFace Leaderboard!\nReleased in partnership with Platypus.## LlongOrca 7B & 13B\n\n* Our first 7B release, trained on top of LLongMA2 to achieve 16,000 tokens context. #1 long context 7B model at release time, with >99% of the overall #1 model's performance.\n* LlongOrca-13B-16k, trained on top of LLongMA2. #1 long context 13B model at release time, with >97% of the overall #1 model's performance.## OpenOrcaxOpenChat-Preview2-13B\n\nOur second model, highlighting that we've surpassed the performance reported in the Orca paper.\nWas #1 at release time, now surpassed by our own OpenOrca-Platypus2-13B.\nReleased in partnership with OpenChat.## OpenOrca-Preview1-13B\n\nOpenOrca-Preview1-13B\nThis model was trained in less than a day, for <$200, with <10% of our data.\nAt release, it beat the current state of the art models on BigBench-Hard and AGIEval. Achieves ~60% of the improvements reported in the Orca paper.\n\n<a name=\"dataset-summary\"></a># Dataset Summary\n\nThe OpenOrca dataset is a collection of augmented FLAN Collection data.\nCurrently ~1M GPT-4 completions, and ~3.2M GPT-3.5 completions.\nIt is tabularized in alignment with the distributions presented in the ORCA paper and currently represents a partial completion of the full intended dataset, with ongoing generation to expand its scope.\nThe data is primarily used for training and evaluation in the field of natural language processing.\n\n<a name=\"dataset-attribution\"></a>", "passage: # Dataset Attribution\n\nWe would like to give special recognition to the following contributors for their significant efforts and dedication:\n \n Teknium \n WingLian/Caseus\n Eric Hartford\n NanoBit\n Pankaj\n Winddude\n Rohan\n URL:\n Autometa\n Entropi\n AtlasUnified\n NeverendingToast\n NanoBit\n WingLian/Caseus\nAlso of course, as always, TheBloke, for being the backbone of the whole community.\n\nMany thanks to NanoBit and Caseus, makers of Axolotl, for lending us their expertise on the platform that developed and trained manticore, minotaur, and many others! \n\nWe are welcoming sponsors or collaborators to help us build these models to the scale they deserve. Please reach out via our socials:\nURL URL\n\nWant to visualize our full dataset? Check out our Nomic Atlas Map.\n <img src=\"URL alt=\"Atlas Nomic Dataset Map\" width=\"400\" height=\"400\" />\n\n\n<a name=\"supported-tasks-and-leaderboards\"></a># Supported Tasks and Leaderboards\n\nThis dataset supports a range of tasks including language modeling, text generation, and text augmentation.\nIt has been instrumental in the generation of multiple high-performing model checkpoints which have exhibited exceptional performance in our unit testing.\nFurther information on leaderboards will be updated as they become available.\n\n<a name=\"languages\"></a># Languages\n\nThe language of the data is primarily English.\n\n<a name=\"dataset-structure\"></a># Dataset Structure\n\n<a name=\"data-instances\"></a>## Data Instances\n\nA data instance in this dataset represents entries from the FLAN collection which have been augmented by submitting the listed question to either GPT-4 or GPT-3.5.\nThe response is then entered into the response field.\n\n<a name=\"data-fields\"></a>## Data Fields\n\nThe fields are:\n1) 'id', a unique numbered identifier which includes one of 'niv', 't0', 'cot', or 'flan' to represent which source FLAN Collection submix the 'question' is sourced from.\n2) 'system_prompt', representing the System Prompt presented to the GPT-3.5 or GPT-4 API for the datapoint\n3) 'question', representing a question entry as provided by the FLAN Collection\n4) 'response', a response to that question received from a query to either GPT-3.5 or GPT-4.\n<a name=\"data-splits\"></a>## Data Splits\nThe data is unsplit.\n<a name=\"dataset-creation\"></a># Dataset Creation\n<a name=\"curation-rationale\"></a>" ]
e73abef53771d1ec02298342ab51590134ec3d53
# Dataset Card for "agent_action_small" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Raihan004/agent_action_small
[ "region:us" ]
2023-11-04T06:20:19+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "\u0995\u09c1\u0995\u09c1\u09b0_\u0996\u09c7\u09b2\u09be_\u0995\u09b0\u09be", "1": "\u099b\u09c7\u09b2\u09c7_\u0995\u09ae\u09cd\u09aa\u09bf\u0989\u099f\u09be\u09b0_\u09ac\u09cd\u09af\u09ac\u09b9\u09be\u09b0_\u0995\u09b0\u09be", "2": "\u099b\u09c7\u09b2\u09c7_\u0996\u09c7\u09b2\u09be_\u0995\u09b0\u09be", "3": "\u09ac\u09bf\u09a1\u09bc\u09be\u09b2_\u0996\u09c7\u09b2\u09be_\u0995\u09b0\u09be", "4": "\u09ae\u09c7\u09af\u09bc\u09c7_\u0995\u09ae\u09cd\u09aa\u09bf\u0989\u099f\u09be\u09b0_\u09ac\u09cd\u09af\u09ac\u09b9\u09be\u09b0_\u0995\u09b0\u09be", "5": "\u09ae\u09c7\u09af\u09bc\u09c7_\u0996\u09c7\u09b2\u09be_\u0995\u09b0\u09be"}}}}], "splits": [{"name": "train", "num_bytes": 40805324.8982036, "num_examples": 709}, {"name": "test", "num_bytes": 7953854.101796407, "num_examples": 126}], "download_size": 48559260, "dataset_size": 48759179.0}}
2023-11-05T04:33:11+00:00
[]
[]
TAGS #region-us
# Dataset Card for "agent_action_small" More Information needed
[ "# Dataset Card for \"agent_action_small\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"agent_action_small\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"agent_action_small\"\n\nMore Information needed" ]
d084a3be906c714f227fe1e83b565656860dbd47
# Dataset Card for "bn_txt_samples" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
sawradip/bn-text-subcorpus
[ "region:us" ]
2023-11-04T06:46:44+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "length", "dtype": "int32"}], "splits": [{"name": "train", "num_bytes": 157918032, "num_examples": 500000}], "download_size": 71368458, "dataset_size": 157918032}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-04T06:46:53+00:00
[]
[]
TAGS #region-us
# Dataset Card for "bn_txt_samples" More Information needed
[ "# Dataset Card for \"bn_txt_samples\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"bn_txt_samples\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"bn_txt_samples\"\n\nMore Information needed" ]
701af70005fec816ef679184e22568a7ec8d3d29
# Dataset Card for "instruction-gpt-3.5" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
psyche/instruction-gpt-3.5
[ "region:us" ]
2023-11-04T07:03:14+00:00
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "gpt-3.5-turbo", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 6449418, "num_examples": 5884}], "download_size": 3445248, "dataset_size": 6449418}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-04T07:03:26+00:00
[]
[]
TAGS #region-us
# Dataset Card for "instruction-gpt-3.5" More Information needed
[ "# Dataset Card for \"instruction-gpt-3.5\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"instruction-gpt-3.5\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"instruction-gpt-3.5\"\n\nMore Information needed" ]
820a8b8aa5df64239172a052663749b1effc9676
This repository is mirror for https://rimbawatchmy.com/databases ## dataset 1. Deforestation 2017-2021 (Geospatial Data) 2. Deforestation 2017-2021 (Full Text Data) 3. Miscellaneous Data (Geospatial Data) 4. Miscellaneous Data (Full Text Data) 5. Approved Forest-Risk EIAs (Geospatial Data) 6. Approved Forest-Risk EIAs (Full Text Data) 7. Real Estate Listings of Forested Land (Geospatial Data) 8. Real Estate Listings of Forested Land (Full Text Data) 9. Real Estate Listings of Forested Land (Listings) 10. Forest Reserve Degazettement (Geospatial Data) 11. Forest Reserve Degazettement (Full Text Data) 12. Zoning of Forests for Non-Forest Use (Geospatial Data) 13. Zoning of Forests for Non-Forest Use (Full Text Data) 14. Indigenous Land Rights Conflicts (Geospatial Data) 15. Indigenous Land Rights Conflicts (Full Text Data) 16. Indigenous Villages and Customary Territories (Geospatial Data) 17. Forest Reserves (Geospatial Data) 18. Central Forest Spine Connections, 2022 (Geospatial Data)
malaysia-ai/forestwatch-mirror
[ "language:ms", "region:us" ]
2023-11-04T07:36:31+00:00
{"language": ["ms"]}
2023-11-04T07:40:48+00:00
[]
[ "ms" ]
TAGS #language-Malay (macrolanguage) #region-us
This repository is mirror for URL ## dataset 1. Deforestation 2017-2021 (Geospatial Data) 2. Deforestation 2017-2021 (Full Text Data) 3. Miscellaneous Data (Geospatial Data) 4. Miscellaneous Data (Full Text Data) 5. Approved Forest-Risk EIAs (Geospatial Data) 6. Approved Forest-Risk EIAs (Full Text Data) 7. Real Estate Listings of Forested Land (Geospatial Data) 8. Real Estate Listings of Forested Land (Full Text Data) 9. Real Estate Listings of Forested Land (Listings) 10. Forest Reserve Degazettement (Geospatial Data) 11. Forest Reserve Degazettement (Full Text Data) 12. Zoning of Forests for Non-Forest Use (Geospatial Data) 13. Zoning of Forests for Non-Forest Use (Full Text Data) 14. Indigenous Land Rights Conflicts (Geospatial Data) 15. Indigenous Land Rights Conflicts (Full Text Data) 16. Indigenous Villages and Customary Territories (Geospatial Data) 17. Forest Reserves (Geospatial Data) 18. Central Forest Spine Connections, 2022 (Geospatial Data)
[ "## dataset\n\n1. Deforestation 2017-2021 (Geospatial Data)\n2. Deforestation 2017-2021 (Full Text Data)\n3. Miscellaneous Data (Geospatial Data)\n4. Miscellaneous Data (Full Text Data)\n5. Approved Forest-Risk EIAs (Geospatial Data)\n6. Approved Forest-Risk EIAs (Full Text Data)\n7. Real Estate Listings of Forested Land (Geospatial Data)\n8. Real Estate Listings of Forested Land (Full Text Data)\n9. Real Estate Listings of Forested Land (Listings)\n10. Forest Reserve Degazettement (Geospatial Data)\n11. Forest Reserve Degazettement (Full Text Data)\n12. Zoning of Forests for Non-Forest Use (Geospatial Data)\n13. Zoning of Forests for Non-Forest Use (Full Text Data)\n14. Indigenous Land Rights Conflicts (Geospatial Data)\n15. Indigenous Land Rights Conflicts (Full Text Data)\n16. Indigenous Villages and Customary Territories (Geospatial Data)\n17. Forest Reserves (Geospatial Data)\n18. Central Forest Spine Connections, 2022 (Geospatial Data)" ]
[ "TAGS\n#language-Malay (macrolanguage) #region-us \n", "## dataset\n\n1. Deforestation 2017-2021 (Geospatial Data)\n2. Deforestation 2017-2021 (Full Text Data)\n3. Miscellaneous Data (Geospatial Data)\n4. Miscellaneous Data (Full Text Data)\n5. Approved Forest-Risk EIAs (Geospatial Data)\n6. Approved Forest-Risk EIAs (Full Text Data)\n7. Real Estate Listings of Forested Land (Geospatial Data)\n8. Real Estate Listings of Forested Land (Full Text Data)\n9. Real Estate Listings of Forested Land (Listings)\n10. Forest Reserve Degazettement (Geospatial Data)\n11. Forest Reserve Degazettement (Full Text Data)\n12. Zoning of Forests for Non-Forest Use (Geospatial Data)\n13. Zoning of Forests for Non-Forest Use (Full Text Data)\n14. Indigenous Land Rights Conflicts (Geospatial Data)\n15. Indigenous Land Rights Conflicts (Full Text Data)\n16. Indigenous Villages and Customary Territories (Geospatial Data)\n17. Forest Reserves (Geospatial Data)\n18. Central Forest Spine Connections, 2022 (Geospatial Data)" ]
[ 16, 265 ]
[ "passage: TAGS\n#language-Malay (macrolanguage) #region-us \n## dataset\n\n1. Deforestation 2017-2021 (Geospatial Data)\n2. Deforestation 2017-2021 (Full Text Data)\n3. Miscellaneous Data (Geospatial Data)\n4. Miscellaneous Data (Full Text Data)\n5. Approved Forest-Risk EIAs (Geospatial Data)\n6. Approved Forest-Risk EIAs (Full Text Data)\n7. Real Estate Listings of Forested Land (Geospatial Data)\n8. Real Estate Listings of Forested Land (Full Text Data)\n9. Real Estate Listings of Forested Land (Listings)\n10. Forest Reserve Degazettement (Geospatial Data)\n11. Forest Reserve Degazettement (Full Text Data)\n12. Zoning of Forests for Non-Forest Use (Geospatial Data)\n13. Zoning of Forests for Non-Forest Use (Full Text Data)\n14. Indigenous Land Rights Conflicts (Geospatial Data)\n15. Indigenous Land Rights Conflicts (Full Text Data)\n16. Indigenous Villages and Customary Territories (Geospatial Data)\n17. Forest Reserves (Geospatial Data)\n18. Central Forest Spine Connections, 2022 (Geospatial Data)" ]
29941543ca41bba117765f1e2637a8ab09f33cc5
* website: [jurnal-malaysia](https://jurnal-malaysia.com/) * Number of pages scraped: 20 * Number of posts scraped: 1938 * Link to dataset on [Huggingface](https://huggingface.co/datasets/haizad/jurnal-malaysia-scraped)
haizad/jurnal-malaysia-scraped
[ "language:ms", "region:us" ]
2023-11-04T10:12:33+00:00
{"language": ["ms"]}
2023-11-04T10:22:57+00:00
[]
[ "ms" ]
TAGS #language-Malay (macrolanguage) #region-us
* website: jurnal-malaysia * Number of pages scraped: 20 * Number of posts scraped: 1938 * Link to dataset on Huggingface
[]
[ "TAGS\n#language-Malay (macrolanguage) #region-us \n" ]
[ 16 ]
[ "passage: TAGS\n#language-Malay (macrolanguage) #region-us \n" ]
2c5ca3c7efb32815b7776391a335f4a5c4ec3e0b
# Dataset Card for "dataset-farma-version1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
marziye-A/dataset-farma-version1
[ "region:us" ]
2023-11-04T10:33:15+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "name", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 73044576.0, "num_examples": 1980}], "download_size": 71493318, "dataset_size": 73044576.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-04T13:07:33+00:00
[]
[]
TAGS #region-us
# Dataset Card for "dataset-farma-version1" More Information needed
[ "# Dataset Card for \"dataset-farma-version1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"dataset-farma-version1\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"dataset-farma-version1\"\n\nMore Information needed" ]
f92538760dfa9a60bd476493873107e657d434d8
# Dataset Card for "ljspeech" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dinhbinh161/ljspeech
[ "region:us" ]
2023-11-04T10:38:56+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 22050}}}, {"name": "file", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "normalized_text", "dtype": "string"}, {"name": "duration", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 3860331368.0, "num_examples": 13100}], "download_size": 3786374077, "dataset_size": 3860331368.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-04T10:46:23+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ljspeech" More Information needed
[ "# Dataset Card for \"ljspeech\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ljspeech\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ljspeech\"\n\nMore Information needed" ]
ddecaa2a4db4755aa36f6f97a2e0912dd8d51474
# Dataset Card for "mutability_classifier-1-1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
coastalcph/mutability_classifier-1-1
[ "region:us" ]
2023-11-04T11:14:00+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "query", "dtype": "string"}, {"name": "answer", "list": [{"name": "wikidata_id", "dtype": "string"}, {"name": "name", "dtype": "string"}]}, {"name": "id", "dtype": "string"}, {"name": "relation", "dtype": "string"}, {"name": "date", "dtype": "int64"}, {"name": "type", "dtype": "string"}, {"name": "is_mutable", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1095030.2883583691, "num_examples": 6230}, {"name": "validation", "num_bytes": 995487.3818577483, "num_examples": 5783}, {"name": "test", "num_bytes": 858144.5198522622, "num_examples": 4360}], "download_size": 1062216, "dataset_size": 2948662.19006838}}
2023-11-04T11:14:08+00:00
[]
[]
TAGS #region-us
# Dataset Card for "mutability_classifier-1-1" More Information needed
[ "# Dataset Card for \"mutability_classifier-1-1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"mutability_classifier-1-1\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"mutability_classifier-1-1\"\n\nMore Information needed" ]
3d7819783e40b5e5961485e4d3971ea524b2aa1c
# Dataset Card for "mutability_classifier-1-n" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
coastalcph/mutability_classifier-1-n
[ "region:us" ]
2023-11-04T11:14:08+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "query", "dtype": "string"}, {"name": "answer", "list": [{"name": "wikidata_id", "dtype": "string"}, {"name": "name", "dtype": "string"}]}, {"name": "id", "dtype": "string"}, {"name": "relation", "dtype": "string"}, {"name": "date", "dtype": "int64"}, {"name": "type", "dtype": "string"}, {"name": "is_mutable", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1199436.065450644, "num_examples": 6824}, {"name": "validation", "num_bytes": 1017521.3408544267, "num_examples": 5911}, {"name": "test", "num_bytes": 837675.0175438597, "num_examples": 4256}], "download_size": 1322347, "dataset_size": 3054632.4238489303}}
2023-11-04T11:14:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "mutability_classifier-1-n" More Information needed
[ "# Dataset Card for \"mutability_classifier-1-n\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"mutability_classifier-1-n\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"mutability_classifier-1-n\"\n\nMore Information needed" ]
1c129823b763a27ad690bb74c3577e4e4eaef387
Dataset used to train this project https://www.youtube.com/watch?v=2z3iV9BN94c --- Contact --- @RoniBandini https://www.linkedin.com/in/ronibandini/
ronibandini/AmericanSignLanguage
[ "license:mit", "region:us" ]
2023-11-04T11:34:33+00:00
{"license": "mit"}
2023-11-06T12:58:06+00:00
[]
[]
TAGS #license-mit #region-us
Dataset used to train this project URL --- Contact --- @RoniBandini URL
[]
[ "TAGS\n#license-mit #region-us \n" ]
[ 11 ]
[ "passage: TAGS\n#license-mit #region-us \n" ]
6fcaecb564acb715417b1bc77ff1423699a86186
# Dataset Card for "LLM_DATASET" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
GHOFRANEE/LLM_DATASET
[ "region:us" ]
2023-11-04T11:48:13+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 548281, "num_examples": 60}, {"name": "validation", "num_bytes": 198365, "num_examples": 20}], "download_size": 318586, "dataset_size": 746646}}
2023-11-04T11:48:17+00:00
[]
[]
TAGS #region-us
# Dataset Card for "LLM_DATASET" More Information needed
[ "# Dataset Card for \"LLM_DATASET\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"LLM_DATASET\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"LLM_DATASET\"\n\nMore Information needed" ]
8f2bd825d60c8d6da144f79154809a930d2efb8e
# Dataset Card for "FLD_1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
cestwc/FLD_1
[ "region:us" ]
2023-11-04T11:56:39+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "hypothesis", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "hypothesis_formula", "dtype": "string"}, {"name": "context_formula", "dtype": "string"}, {"name": "proofs", "sequence": "string"}, {"name": "proof_label", "dtype": "string"}, {"name": "proofs_formula", "sequence": "string"}, {"name": "world_assump_label", "dtype": "string"}, {"name": "original_tree_depth", "dtype": "int64"}, {"name": "depth", "dtype": "int64"}, {"name": "num_formula_distractors", "dtype": "int64"}, {"name": "num_translation_distractors", "dtype": "int64"}, {"name": "num_all_distractors", "dtype": "int64"}, {"name": "negative_hypothesis", "dtype": "string"}, {"name": "negative_hypothesis_formula", "dtype": "string"}, {"name": "negative_original_tree_depth", "dtype": "int64"}, {"name": "negative_proofs", "sequence": "string"}, {"name": "negative_proof_label", "dtype": "string"}, {"name": "negative_world_assump_label", "dtype": "string"}, {"name": "prompt_serial", "dtype": "string"}, {"name": "proof_serial", "dtype": "string"}, {"name": "version", "dtype": "string"}, {"name": "premise", "dtype": "string"}, {"name": "assumptions", "sequence": "string"}, {"name": "paraphrased_premises", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 111376663, "num_examples": 30000}, {"name": "validation", "num_bytes": 18529236, "num_examples": 5000}, {"name": "test", "num_bytes": 18529220, "num_examples": 5000}], "download_size": 56252409, "dataset_size": 148435119}}
2023-11-04T11:56:49+00:00
[]
[]
TAGS #region-us
# Dataset Card for "FLD_1" More Information needed
[ "# Dataset Card for \"FLD_1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"FLD_1\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"FLD_1\"\n\nMore Information needed" ]
a10216ca4c9903509a27ed03e3c20619bbd32e6a
Teknofest for nlp text classification data
WillyWilliam/teknofest-nlp-data
[ "region:us" ]
2023-11-04T11:57:56+00:00
{}
2023-11-04T11:59:50+00:00
[]
[]
TAGS #region-us
Teknofest for nlp text classification data
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
51238f7c8097f47afb8551518f635425da129c36
# Dataset Card for "instruction_data" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
fiveflow/instruction_data
[ "region:us" ]
2023-11-04T13:03:42+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 28123766, "num_examples": 44905}], "download_size": 15302646, "dataset_size": 28123766}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-06T07:55:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for "instruction_data" More Information needed
[ "# Dataset Card for \"instruction_data\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"instruction_data\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"instruction_data\"\n\nMore Information needed" ]
4bd49e8bbe44ba086d8a3071967817fcb70759c4
# Dataset Card for "cnn_news_ptbr" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
celsowm/cnn_news_ptbr
[ "region:us" ]
2023-11-04T13:04:04+00:00
{"dataset_info": {"features": [{"name": "titulo", "dtype": "string"}, {"name": "texto", "dtype": "string"}, {"name": "link", "dtype": "string"}, {"name": "resumo", "dtype": "string"}, {"name": "categoria", "dtype": "string"}, {"name": "data_hora", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 9586869, "num_examples": 3877}], "download_size": 5402043, "dataset_size": 9586869}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-12-01T03:13:56+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cnn_news_ptbr" More Information needed
[ "# Dataset Card for \"cnn_news_ptbr\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cnn_news_ptbr\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cnn_news_ptbr\"\n\nMore Information needed" ]
2fe41aa8550a481fab667ec439cc6811de36845a
## About dataset We construct this dataset for our study, which investigates the correlation between GitHub communication metrics and citation counts, examining the potential of the metrics as an altmetric. Currently, it contains about 12,000 samples of publications, which are published by top-tier AI conferences. The citation counts and the corresponding GitHub metrics might need to be updated. We strive our best to update more conferences and keep values up-to-date. ### Target conferences We collect publications from 2018 to 2022 in the following conferences: - CVPR - ECCV - ICML - ICLR - NeurIPS Those are conferences which are to be included. - ICCV - ACL - EMNLP - NAACL - AAAI - INTERSPEECH - ICASSP ## Contributors Please note that most contributors in this project major in library science, so there might be a need for more knowledge about ML/DL and AI fields. - [@yklikesyou](https://huggingface.co/yklikesyou) - Shinhye Cha - [@deepkyu](https://huggingface.co/deepkyu)
deepkyu/github-as-altmetric
[ "task_categories:tabular-regression", "language:en", "license:apache-2.0", "region:us" ]
2023-11-04T13:10:03+00:00
{"language": ["en"], "license": "apache-2.0", "task_categories": ["tabular-regression"]}
2023-11-05T05:55:44+00:00
[]
[ "en" ]
TAGS #task_categories-tabular-regression #language-English #license-apache-2.0 #region-us
## About dataset We construct this dataset for our study, which investigates the correlation between GitHub communication metrics and citation counts, examining the potential of the metrics as an altmetric. Currently, it contains about 12,000 samples of publications, which are published by top-tier AI conferences. The citation counts and the corresponding GitHub metrics might need to be updated. We strive our best to update more conferences and keep values up-to-date. ### Target conferences We collect publications from 2018 to 2022 in the following conferences: - CVPR - ECCV - ICML - ICLR - NeurIPS Those are conferences which are to be included. - ICCV - ACL - EMNLP - NAACL - AAAI - INTERSPEECH - ICASSP ## Contributors Please note that most contributors in this project major in library science, so there might be a need for more knowledge about ML/DL and AI fields. - @yklikesyou - Shinhye Cha - @deepkyu
[ "## About dataset\n\nWe construct this dataset for our study, which investigates the correlation between GitHub communication metrics and citation counts, examining the potential of the metrics as an altmetric. \n\nCurrently, it contains about 12,000 samples of publications, which are published by top-tier AI conferences. \nThe citation counts and the corresponding GitHub metrics might need to be updated. \nWe strive our best to update more conferences and keep values up-to-date.", "### Target conferences\n\nWe collect publications from 2018 to 2022 in the following conferences:\n\n- CVPR\n- ECCV\n- ICML\n- ICLR\n- NeurIPS\n\nThose are conferences which are to be included.\n\n- ICCV\n- ACL\n- EMNLP\n- NAACL\n- AAAI\n- INTERSPEECH\n- ICASSP", "## Contributors\n\nPlease note that most contributors in this project major in library science, so there might be a need for more knowledge about ML/DL and AI fields.\n\n- @yklikesyou\n- Shinhye Cha\n- @deepkyu" ]
[ "TAGS\n#task_categories-tabular-regression #language-English #license-apache-2.0 #region-us \n", "## About dataset\n\nWe construct this dataset for our study, which investigates the correlation between GitHub communication metrics and citation counts, examining the potential of the metrics as an altmetric. \n\nCurrently, it contains about 12,000 samples of publications, which are published by top-tier AI conferences. \nThe citation counts and the corresponding GitHub metrics might need to be updated. \nWe strive our best to update more conferences and keep values up-to-date.", "### Target conferences\n\nWe collect publications from 2018 to 2022 in the following conferences:\n\n- CVPR\n- ECCV\n- ICML\n- ICLR\n- NeurIPS\n\nThose are conferences which are to be included.\n\n- ICCV\n- ACL\n- EMNLP\n- NAACL\n- AAAI\n- INTERSPEECH\n- ICASSP", "## Contributors\n\nPlease note that most contributors in this project major in library science, so there might be a need for more knowledge about ML/DL and AI fields.\n\n- @yklikesyou\n- Shinhye Cha\n- @deepkyu" ]
[ 30, 110, 72, 54 ]
[ "passage: TAGS\n#task_categories-tabular-regression #language-English #license-apache-2.0 #region-us \n## About dataset\n\nWe construct this dataset for our study, which investigates the correlation between GitHub communication metrics and citation counts, examining the potential of the metrics as an altmetric. \n\nCurrently, it contains about 12,000 samples of publications, which are published by top-tier AI conferences. \nThe citation counts and the corresponding GitHub metrics might need to be updated. \nWe strive our best to update more conferences and keep values up-to-date.### Target conferences\n\nWe collect publications from 2018 to 2022 in the following conferences:\n\n- CVPR\n- ECCV\n- ICML\n- ICLR\n- NeurIPS\n\nThose are conferences which are to be included.\n\n- ICCV\n- ACL\n- EMNLP\n- NAACL\n- AAAI\n- INTERSPEECH\n- ICASSP## Contributors\n\nPlease note that most contributors in this project major in library science, so there might be a need for more knowledge about ML/DL and AI fields.\n\n- @yklikesyou\n- Shinhye Cha\n- @deepkyu" ]
783e9638dda792412fa154ef7b478d63421f925d
# Dataset Card for "HealthAdvice_Dutch_translated_with_MariaNMT" Translation of the **English** version of [HealthAdvice](https://huggingface.co/datasets/medalpaca/medical_meadow_health_advice), to **Dutch** using an [Maria NMT model](https://marian-nmt.github.io/), trained by [Helsinki NLP](https://huggingface.co/Helsinki-NLP/opus-mt-en-nl). Note, for reference: Maria NMT is based on [BART](https://huggingface.co/docs/transformers/model_doc/bart), described [here](https://arxiv.org/abs/1910.13461). # Attribution If you use this dataset please use the following to credit the creators of the Health Advice corpus: ```citation @inproceedings{yu-etal-2019-detecting, title = "Detecting Causal Language Use in Science Findings", author = "Yu, Bei and Li, Yingya and Wang, Jun", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", month = nov, year = "2019", address = "Hong Kong, China", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/D19-1473", doi = "10.18653/v1/D19-1473", pages = "4664--4674", } ``` The creators of the OPUS-MT models: ``` @InProceedings{TiedemannThottingal:EAMT2020, author = {J{\"o}rg Tiedemann and Santhosh Thottingal}, title = {{OPUS-MT} — {B}uilding open translation services for the {W}orld}, booktitle = {Proceedings of the 22nd Annual Conferenec of the European Association for Machine Translation (EAMT)}, year = {2020}, address = {Lisbon, Portugal} } ``` and ``` @misc {van_es_2023, author = { {Bram van Es} }, title = { HealthAdvice_Dutch_translated_with_MariaNMT (Revision ae2436a) }, year = 2023, url = { https://huggingface.co/datasets/UMCU/HealthAdvice_Dutch_translated_with_MariaNMT }, doi = { 10.57967/hf/1481 }, publisher = { Hugging Face } } ``` # License For both the Maria NMT model and the original [Helsinki NLP](https://twitter.com/HelsinkiNLP) [Opus MT model](https://huggingface.co/Helsinki-NLP) we did **not** find a license. We also did not find a license for the HealthAdvice corpus. For these reasons we use an academic free license v3. license. If this was in error please let us know and we will add the appropriate licensing promptly.
UMCU/HealthAdvice_Dutch_translated_with_MariaNMT
[ "task_categories:text-classification", "task_categories:question-answering", "task_categories:sentence-similarity", "size_categories:1K<n<10K", "language:nl", "license:afl-3.0", "healthcare", "arxiv:1910.13461", "doi:10.57967/hf/1481", "region:us" ]
2023-11-04T13:59:00+00:00
{"language": ["nl"], "license": "afl-3.0", "size_categories": ["1K<n<10K"], "task_categories": ["text-classification", "question-answering", "sentence-similarity"], "pretty_name": "HealthAdvice_Dutch", "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2282530, "num_examples": 8384}], "download_size": 931750, "dataset_size": 2282530}, "tags": ["healthcare"]}
2023-12-14T14:34:47+00:00
[ "1910.13461" ]
[ "nl" ]
TAGS #task_categories-text-classification #task_categories-question-answering #task_categories-sentence-similarity #size_categories-1K<n<10K #language-Dutch #license-afl-3.0 #healthcare #arxiv-1910.13461 #doi-10.57967/hf/1481 #region-us
# Dataset Card for "HealthAdvice_Dutch_translated_with_MariaNMT" Translation of the English version of HealthAdvice, to Dutch using an Maria NMT model, trained by Helsinki NLP. Note, for reference: Maria NMT is based on BART, described here. # Attribution If you use this dataset please use the following to credit the creators of the Health Advice corpus: The creators of the OPUS-MT models: and # License For both the Maria NMT model and the original Helsinki NLP Opus MT model we did not find a license. We also did not find a license for the HealthAdvice corpus. For these reasons we use an academic free license v3. license. If this was in error please let us know and we will add the appropriate licensing promptly.
[ "# Dataset Card for \"HealthAdvice_Dutch_translated_with_MariaNMT\"\n\n\nTranslation of the English version of HealthAdvice,\nto Dutch using an Maria NMT model, trained by Helsinki NLP.\nNote, for reference: Maria NMT is based on BART, described here.", "# Attribution\n\nIf you use this dataset please use the following to credit the creators of the Health Advice corpus:\n\n\n\nThe creators of the OPUS-MT models:\n\n\nand", "# License\n\nFor both the Maria NMT model and the original Helsinki NLP Opus MT model \nwe did not find a license. We also did not find a license for the HealthAdvice corpus. For these reasons we use an academic free license v3. \nlicense. If this was in error please let us know and we will add the appropriate licensing promptly." ]
[ "TAGS\n#task_categories-text-classification #task_categories-question-answering #task_categories-sentence-similarity #size_categories-1K<n<10K #language-Dutch #license-afl-3.0 #healthcare #arxiv-1910.13461 #doi-10.57967/hf/1481 #region-us \n", "# Dataset Card for \"HealthAdvice_Dutch_translated_with_MariaNMT\"\n\n\nTranslation of the English version of HealthAdvice,\nto Dutch using an Maria NMT model, trained by Helsinki NLP.\nNote, for reference: Maria NMT is based on BART, described here.", "# Attribution\n\nIf you use this dataset please use the following to credit the creators of the Health Advice corpus:\n\n\n\nThe creators of the OPUS-MT models:\n\n\nand", "# License\n\nFor both the Maria NMT model and the original Helsinki NLP Opus MT model \nwe did not find a license. We also did not find a license for the HealthAdvice corpus. For these reasons we use an academic free license v3. \nlicense. If this was in error please let us know and we will add the appropriate licensing promptly." ]
[ 92, 67, 36, 75 ]
[ "passage: TAGS\n#task_categories-text-classification #task_categories-question-answering #task_categories-sentence-similarity #size_categories-1K<n<10K #language-Dutch #license-afl-3.0 #healthcare #arxiv-1910.13461 #doi-10.57967/hf/1481 #region-us \n# Dataset Card for \"HealthAdvice_Dutch_translated_with_MariaNMT\"\n\n\nTranslation of the English version of HealthAdvice,\nto Dutch using an Maria NMT model, trained by Helsinki NLP.\nNote, for reference: Maria NMT is based on BART, described here.# Attribution\n\nIf you use this dataset please use the following to credit the creators of the Health Advice corpus:\n\n\n\nThe creators of the OPUS-MT models:\n\n\nand# License\n\nFor both the Maria NMT model and the original Helsinki NLP Opus MT model \nwe did not find a license. We also did not find a license for the HealthAdvice corpus. For these reasons we use an academic free license v3. \nlicense. If this was in error please let us know and we will add the appropriate licensing promptly." ]
fc22ba2b96d6bbb6beb49b5d469af9385f8a6846
## Dataset Description Subtitle dari 110 episode anime boruto dengan subtitle indoneisa pilihan yang terbaik ## How to use 1. Instal library datasets dari Hugging Face menggunakan perintah berikut: ```python pip install datasets ``` 2. Muat dataset dengan menggunakan kode berikut: ```python from datasets import load_dataset dataset = load_dataset("mabzak/anime_subtitle") ``` 3. Contoh menampilkan tabel ```python from prettytable import PrettyTable train_data = dataset['train'] # Ambil contoh data dari train_data example_data = train_data[0] # Inisialisasi tabel table = PrettyTable(example_data.keys()) # Tambahkan data ke tabel table.add_row(example_data.values()) # Tampilkan tabel print(table) ```
mabzak/anime_subtitle
[ "task_categories:translation", "size_categories:10K<n<100K", "source_datasets:subscene", "language:id", "language:en", "license:unknown", "region:us" ]
2023-11-04T14:10:24+00:00
{"language": ["id", "en"], "license": ["unknown"], "size_categories": ["10K<n<100K"], "source_datasets": ["subscene"], "task_categories": ["translation"]}
2023-11-04T15:32:30+00:00
[]
[ "id", "en" ]
TAGS #task_categories-translation #size_categories-10K<n<100K #source_datasets-subscene #language-Indonesian #language-English #license-unknown #region-us
## Dataset Description Subtitle dari 110 episode anime boruto dengan subtitle indoneisa pilihan yang terbaik ## How to use 1. Instal library datasets dari Hugging Face menggunakan perintah berikut: 2. Muat dataset dengan menggunakan kode berikut: 3. Contoh menampilkan tabel
[ "## Dataset Description\nSubtitle dari 110 episode anime boruto dengan subtitle indoneisa pilihan yang terbaik", "## How to use\n1. Instal library datasets dari Hugging Face menggunakan perintah berikut:\n\n2. Muat dataset dengan menggunakan kode berikut:\n\n3. Contoh menampilkan tabel" ]
[ "TAGS\n#task_categories-translation #size_categories-10K<n<100K #source_datasets-subscene #language-Indonesian #language-English #license-unknown #region-us \n", "## Dataset Description\nSubtitle dari 110 episode anime boruto dengan subtitle indoneisa pilihan yang terbaik", "## How to use\n1. Instal library datasets dari Hugging Face menggunakan perintah berikut:\n\n2. Muat dataset dengan menggunakan kode berikut:\n\n3. Contoh menampilkan tabel" ]
[ 53, 21, 33 ]
[ "passage: TAGS\n#task_categories-translation #size_categories-10K<n<100K #source_datasets-subscene #language-Indonesian #language-English #license-unknown #region-us \n## Dataset Description\nSubtitle dari 110 episode anime boruto dengan subtitle indoneisa pilihan yang terbaik## How to use\n1. Instal library datasets dari Hugging Face menggunakan perintah berikut:\n\n2. Muat dataset dengan menggunakan kode berikut:\n\n3. Contoh menampilkan tabel" ]
20e5ca833d1a73d87a1047b8bbbc3f68eebdb49d
# MechanisticProbe [![](https://img.shields.io/badge/License-MIT-blue.svg)]() [![arxiv](https://img.shields.io/badge/arXiv-2310.14491-b31b1b)](https://arxiv.org/abs/2310.14491) [![Dataset Download](https://img.shields.io/badge/Download-Datasets-green)](https://huggingface.co/datasets/yyyyifan/MechanisticProbe_ProofWriter_ARC) [![GitHub Project](https://img.shields.io/badge/GitHub-Project-orange)](https://github.com/yifan-h/MechanisticProbe) ##### Processed data for **[Towards a Mechanistic Interpretation of Multi-Step Reasoning Capabilities of Language Models](https://arxiv.org/abs/2310.14491)**
yyyyifan/MechanisticProbe_ProofWriter_ARC
[ "arxiv:2310.14491", "region:us" ]
2023-11-04T14:16:51+00:00
{}
2023-11-04T14:53:10+00:00
[ "2310.14491" ]
[]
TAGS #arxiv-2310.14491 #region-us
# MechanisticProbe ![]() ![arxiv](URL ![Dataset Download](URL ![GitHub Project](URL ##### Processed data for Towards a Mechanistic Interpretation of Multi-Step Reasoning Capabilities of Language Models
[ "# MechanisticProbe\n\n![]()\n![arxiv](URL\n![Dataset Download](URL\n![GitHub Project](URL", "##### Processed data for Towards a Mechanistic Interpretation of Multi-Step Reasoning Capabilities of Language Models" ]
[ "TAGS\n#arxiv-2310.14491 #region-us \n", "# MechanisticProbe\n\n![]()\n![arxiv](URL\n![Dataset Download](URL\n![GitHub Project](URL", "##### Processed data for Towards a Mechanistic Interpretation of Multi-Step Reasoning Capabilities of Language Models" ]
[ 15, 36, 28 ]
[ "passage: TAGS\n#arxiv-2310.14491 #region-us \n# MechanisticProbe\n\n![]()\n![arxiv](URL\n![Dataset Download](URL\n![GitHub Project](URL##### Processed data for Towards a Mechanistic Interpretation of Multi-Step Reasoning Capabilities of Language Models" ]
b836887545e0319f7345b1f63fb1e57a4edc7ab9
* website: [cypherhackz](https://www.cypherhackz.net/) * Number of pages scraped: 9 * Number of posts scraped: 805 * Link to dataset on [Huggingface](https://huggingface.co/datasets/haizad/cypherhackz-scraped)
haizad/cypherhackz-scraped
[ "language:en", "region:us" ]
2023-11-04T14:36:27+00:00
{"language": ["en"]}
2023-11-04T15:02:07+00:00
[]
[ "en" ]
TAGS #language-English #region-us
* website: cypherhackz * Number of pages scraped: 9 * Number of posts scraped: 805 * Link to dataset on Huggingface
[]
[ "TAGS\n#language-English #region-us \n" ]
[ 10 ]
[ "passage: TAGS\n#language-English #region-us \n" ]
40691c330ada0948e154ac7c070070ac590a7e43
# Dataset Card for "quest-under-capricorn" TODO: upload blip2 captions update readme with tSNE UPDATE README change to darc-ai-quc [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mikehemberger/quest-under-capricorn
[ "region:us" ]
2023-11-04T15:16:50+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "quest-under-capricorn-s01-e01-the-desert-gods", "1": "quest-under-capricorn-s01-e02-hermits-of-borroloola", "2": "quest-under-capricorn-s01-e03-buffalo,-geese-and-men", "3": "quest-under-capricorn-s01-e04-the-artists-of-arnhem-land", "4": "quest-under-capricorn-s01-e05-bush-walkabout", "5": "quest-under-capricorn-s01-e06-the-first-australians"}}}}, {"name": "file_name", "dtype": "string"}, {"name": "show_name", "dtype": "string"}, {"name": "relative_path", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 269324911.0, "num_examples": 21207}], "download_size": 266520230, "dataset_size": 269324911.0}}
2023-12-06T17:22:54+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quest-under-capricorn" TODO: upload blip2 captions update readme with tSNE UPDATE README change to darc-ai-quc More Information needed
[ "# Dataset Card for \"quest-under-capricorn\"\nTODO: upload blip2 captions\nupdate readme with tSNE\nUPDATE README\nchange to darc-ai-quc\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quest-under-capricorn\"\nTODO: upload blip2 captions\nupdate readme with tSNE\nUPDATE README\nchange to darc-ai-quc\n\nMore Information needed" ]
[ 6, 46 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quest-under-capricorn\"\nTODO: upload blip2 captions\nupdate readme with tSNE\nUPDATE README\nchange to darc-ai-quc\n\nMore Information needed" ]
7807e8ff6e0855aa9fb6d7d0d50ed7b19da74068
# Dataset Card for "life-on-earth" ## Dataset Description - **Homepage-Videos:** [https://archive.org/download/WildlifeDocumentaries]() - **Homepage-Dataset:** [https://huggingface.co/datasets/mikehemberger/darcai-life-on-earth]() - **Repository:** [https://github.com/mikehemberger/darc-ai]() - **Point of Contact:** [[email protected]]() ### Dataset Summary The David Attenborough Research Consortium (DARC) loves David Attenborough (DA). And therefore we aim to enrich his fantastic work using modern deep learning, generative artificial intelligence (AI) methods and most recent assistants like ChatGPT. Those results, together with extracted and time stamped image frames ("frame_00000_hh-mm-ss.msmsms.jpg", ...) from videos constitutes the darcai-life-on-earth dataset. As a first enrichment, we include text captions generated by the huggingface "Salesforce/blip2-opt-2.7b" model for >84K image frames as a ready-to-go dataset. Furthermore our [https://huggingface.co/datasets/mikehemberger/darcai-life-on-earth](github-repo-page) includes ViT image embeddings (dim=768) and caption-txt embeddings (using openAIs "text-embedding-ada-002" model, dim=1536) for all >84K images. ### Languages Native english mostly. Some german. Hopefully many more soon. ## Dataset Structure ![life-on-earth-tsne](https://github.com/mikehemberger/darc-ai/blob/main/readme-examples/fig3.png) ### Data Instances { 'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=622x360>, 'label': 0, 'file_name': 'frame_00000_00-00-00.000.jpg', 'show_name': 'life-on-earth-s01-e01-the-infinite-varirty', 'relative_path': 'images/life-on-earth/life-on-earth-s01-e01-the-infinite-varirty', 'caption': 'a black background with a white clock on it' } ### Data Fields - image: a PIL image frame extracted from video (decode=True) - label: One of [0-12] according to 13 episodes - file_name: file name of the PIL image - show_name: name of the show and episode from which the images were extracted - relative_path: where to find the images - caption: text caption for the image generated by huggingface transformers blip2 model ("Salesforce/blip2-opt-2.7b") - ## Dataset Creation
mikehemberger/darcai-life-on-earth
[ "task_categories:zero-shot-classification", "task_categories:translation", "task_categories:summarization", "task_categories:conversational", "task_categories:feature-extraction", "task_categories:sentence-similarity", "size_categories:100K<n<1M", "language:en", "language:de", "license:mit", "biology", "climate", "code", "region:us" ]
2023-11-04T15:21:45+00:00
{"language": ["en", "de"], "license": "mit", "size_categories": ["100K<n<1M"], "task_categories": ["zero-shot-classification", "translation", "summarization", "conversational", "feature-extraction", "sentence-similarity"], "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "life-on-earth-s01-e01-the-infinite-varirty", "1": "life-on-earth-s01-e02-building-bodies", "2": "life-on-earth-s01-e03-the-first-forests", "3": "life-on-earth-s01-e04-the-swarming-hordes", "4": "life-on-earth-s01-e05-conquest-of-the-waters", "5": "life-on-earth-s01-e06-invasion-of-the-land", "6": "life-on-earth-s01-e07-victors-of-the-dry-land", "7": "life-on-earth-s01-e08-lords-of-the-air", "8": "life-on-earth-s01-e09-the-rise-of-the-mammals", "9": "life-on-earth-s01-e10-themes-and-variations", "10": "life-on-earth-s01-e11-the-hunters-and-the-hunted", "11": "life-on-earth-s01-e12-life-in-the-trees", "12": "life-on-earth-s01-e13-the-compulsive-communicators"}}}}, {"name": "file_name", "dtype": "string"}, {"name": "show_name", "dtype": "string"}, {"name": "relative_path", "dtype": "string"}, {"name": "caption", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1072256363, "num_examples": 84323}], "download_size": 1059335669, "dataset_size": 1072256363}, "tags": ["biology", "climate", "code"]}
2023-12-16T18:32:28+00:00
[]
[ "en", "de" ]
TAGS #task_categories-zero-shot-classification #task_categories-translation #task_categories-summarization #task_categories-conversational #task_categories-feature-extraction #task_categories-sentence-similarity #size_categories-100K<n<1M #language-English #language-German #license-mit #biology #climate #code #region-us
# Dataset Card for "life-on-earth" ## Dataset Description - Homepage-Videos: [URL - Homepage-Dataset: [URL - Repository: [URL - Point of Contact: [mikehemberger@URL]() ### Dataset Summary The David Attenborough Research Consortium (DARC) loves David Attenborough (DA). And therefore we aim to enrich his fantastic work using modern deep learning, generative artificial intelligence (AI) methods and most recent assistants like ChatGPT. Those results, together with extracted and time stamped image frames ("frame_00000_hh-URL", ...) from videos constitutes the darcai-life-on-earth dataset. As a first enrichment, we include text captions generated by the huggingface "Salesforce/blip2-opt-2.7b" model for >84K image frames as a ready-to-go dataset. Furthermore our URL includes ViT image embeddings (dim=768) and caption-txt embeddings (using openAIs "text-embedding-ada-002" model, dim=1536) for all >84K images. ### Languages Native english mostly. Some german. Hopefully many more soon. ## Dataset Structure !life-on-earth-tsne ### Data Instances { 'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=622x360>, 'label': 0, 'file_name': 'frame_00000_00-URL', 'show_name': 'life-on-earth-s01-e01-the-infinite-varirty', 'relative_path': 'images/life-on-earth/life-on-earth-s01-e01-the-infinite-varirty', 'caption': 'a black background with a white clock on it' } ### Data Fields - image: a PIL image frame extracted from video (decode=True) - label: One of [0-12] according to 13 episodes - file_name: file name of the PIL image - show_name: name of the show and episode from which the images were extracted - relative_path: where to find the images - caption: text caption for the image generated by huggingface transformers blip2 model ("Salesforce/blip2-opt-2.7b") - ## Dataset Creation
[ "# Dataset Card for \"life-on-earth\"", "## Dataset Description\n\n- Homepage-Videos: [URL\n- Homepage-Dataset: [URL\n- Repository: [URL\n- Point of Contact: [mikehemberger@URL]()", "### Dataset Summary\nThe David Attenborough Research Consortium (DARC) loves David Attenborough (DA). And therefore we aim to enrich his fantastic work using modern deep learning, generative artificial intelligence (AI) methods and most recent assistants like ChatGPT. Those results, together with extracted and time stamped image frames (\"frame_00000_hh-URL\", ...) from videos constitutes the darcai-life-on-earth dataset.\nAs a first enrichment, we include text captions generated by the huggingface \"Salesforce/blip2-opt-2.7b\" model for >84K image frames as a ready-to-go dataset.\nFurthermore our URL includes ViT image embeddings (dim=768) and caption-txt embeddings (using openAIs \"text-embedding-ada-002\" model, dim=1536) for all >84K images.", "### Languages\nNative english mostly. Some german. Hopefully many more soon.", "## Dataset Structure\n\n!life-on-earth-tsne", "### Data Instances\n\n{\n 'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=622x360>,\n 'label': 0,\n 'file_name': 'frame_00000_00-URL',\n 'show_name': 'life-on-earth-s01-e01-the-infinite-varirty',\n 'relative_path': 'images/life-on-earth/life-on-earth-s01-e01-the-infinite-varirty',\n 'caption': 'a black background with a white clock on it'\n }", "### Data Fields\n- image: a PIL image frame extracted from video (decode=True)\n- label: One of [0-12] according to 13 episodes\n- file_name: file name of the PIL image\n- show_name: name of the show and episode from which the images were extracted\n- relative_path: where to find the images\n- caption: text caption for the image generated by huggingface transformers blip2 model (\"Salesforce/blip2-opt-2.7b\")\n\n- ## Dataset Creation" ]
[ "TAGS\n#task_categories-zero-shot-classification #task_categories-translation #task_categories-summarization #task_categories-conversational #task_categories-feature-extraction #task_categories-sentence-similarity #size_categories-100K<n<1M #language-English #language-German #license-mit #biology #climate #code #region-us \n", "# Dataset Card for \"life-on-earth\"", "## Dataset Description\n\n- Homepage-Videos: [URL\n- Homepage-Dataset: [URL\n- Repository: [URL\n- Point of Contact: [mikehemberger@URL]()", "### Dataset Summary\nThe David Attenborough Research Consortium (DARC) loves David Attenborough (DA). And therefore we aim to enrich his fantastic work using modern deep learning, generative artificial intelligence (AI) methods and most recent assistants like ChatGPT. Those results, together with extracted and time stamped image frames (\"frame_00000_hh-URL\", ...) from videos constitutes the darcai-life-on-earth dataset.\nAs a first enrichment, we include text captions generated by the huggingface \"Salesforce/blip2-opt-2.7b\" model for >84K image frames as a ready-to-go dataset.\nFurthermore our URL includes ViT image embeddings (dim=768) and caption-txt embeddings (using openAIs \"text-embedding-ada-002\" model, dim=1536) for all >84K images.", "### Languages\nNative english mostly. Some german. Hopefully many more soon.", "## Dataset Structure\n\n!life-on-earth-tsne", "### Data Instances\n\n{\n 'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=622x360>,\n 'label': 0,\n 'file_name': 'frame_00000_00-URL',\n 'show_name': 'life-on-earth-s01-e01-the-infinite-varirty',\n 'relative_path': 'images/life-on-earth/life-on-earth-s01-e01-the-infinite-varirty',\n 'caption': 'a black background with a white clock on it'\n }", "### Data Fields\n- image: a PIL image frame extracted from video (decode=True)\n- label: One of [0-12] according to 13 episodes\n- file_name: file name of the PIL image\n- show_name: name of the show and episode from which the images were extracted\n- relative_path: where to find the images\n- caption: text caption for the image generated by huggingface transformers blip2 model (\"Salesforce/blip2-opt-2.7b\")\n\n- ## Dataset Creation" ]
[ 107, 13, 42, 213, 17, 16, 152, 119 ]
[ "passage: TAGS\n#task_categories-zero-shot-classification #task_categories-translation #task_categories-summarization #task_categories-conversational #task_categories-feature-extraction #task_categories-sentence-similarity #size_categories-100K<n<1M #language-English #language-German #license-mit #biology #climate #code #region-us \n# Dataset Card for \"life-on-earth\"## Dataset Description\n\n- Homepage-Videos: [URL\n- Homepage-Dataset: [URL\n- Repository: [URL\n- Point of Contact: [mikehemberger@URL]()### Dataset Summary\nThe David Attenborough Research Consortium (DARC) loves David Attenborough (DA). And therefore we aim to enrich his fantastic work using modern deep learning, generative artificial intelligence (AI) methods and most recent assistants like ChatGPT. Those results, together with extracted and time stamped image frames (\"frame_00000_hh-URL\", ...) from videos constitutes the darcai-life-on-earth dataset.\nAs a first enrichment, we include text captions generated by the huggingface \"Salesforce/blip2-opt-2.7b\" model for >84K image frames as a ready-to-go dataset.\nFurthermore our URL includes ViT image embeddings (dim=768) and caption-txt embeddings (using openAIs \"text-embedding-ada-002\" model, dim=1536) for all >84K images.### Languages\nNative english mostly. Some german. Hopefully many more soon.## Dataset Structure\n\n!life-on-earth-tsne" ]
d919aca6f974a5a032e1c72f92971d5c62045f9e
# Dataset Card for "the-private-life-of-plants" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mikehemberger/the-private-life-of-plants
[ "region:us" ]
2023-11-04T15:22:51+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "the-private-life-of-plants-s01-e01-travelling", "1": "the-private-life-of-plants-s01-e02-growing", "2": "the-private-life-of-plants-s01-e03-flowering", "3": "the-private-life-of-plants-s01-e04-the-social-struggle", "4": "the-private-life-of-plants-s01-e05-living-together", "5": "the-private-life-of-plants-s01-e06-surviving"}}}}, {"name": "file_name", "dtype": "string"}, {"name": "show_name", "dtype": "string"}, {"name": "relative_path", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 446942104.0, "num_examples": 35232}], "download_size": 442474213, "dataset_size": 446942104.0}}
2023-11-04T15:23:18+00:00
[]
[]
TAGS #region-us
# Dataset Card for "the-private-life-of-plants" More Information needed
[ "# Dataset Card for \"the-private-life-of-plants\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"the-private-life-of-plants\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"the-private-life-of-plants\"\n\nMore Information needed" ]
857d644a4365e0c82006c71e21d255f186b8a6b4
# Dataset Card for "planet-earth" TODO: upload blip2 captions [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mikehemberger/planet-earth
[ "region:us" ]
2023-11-04T15:23:24+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "planet-earth-s01-e01-from-pole-to-pole", "1": "planet-earth-s01-e02-mountains", "2": "planet-earth-s01-e03-freshwater", "3": "planet-earth-s01-e04-caves", "4": "planet-earth-s01-e05-deserts", "5": "planet-earth-s01-e06-ice-worlds", "6": "planet-earth-s01-e07-great-plains", "7": "planet-earth-s01-e08-jungles", "8": "planet-earth-s01-e09-shallow-seas", "9": "planet-earth-s01-e10-seasonal-forests", "10": "planet-earth-s01-e11-ocean-deep"}}}}, {"name": "file_name", "dtype": "string"}, {"name": "show_name", "dtype": "string"}, {"name": "relative_path", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 976527400.0, "num_examples": 77296}], "download_size": 968089912, "dataset_size": 976527400.0}}
2023-12-15T20:14:56+00:00
[]
[]
TAGS #region-us
# Dataset Card for "planet-earth" TODO: upload blip2 captions More Information needed
[ "# Dataset Card for \"planet-earth\"\n\nTODO: upload blip2 captions\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"planet-earth\"\n\nTODO: upload blip2 captions\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"planet-earth\"\n\nTODO: upload blip2 captions\n\nMore Information needed" ]
7fbd83a3988ad71c51985850e837e50ade0294d2
# Dataset Card for "mental_health_chatbot_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
WinchoQi/mental_health_chatbot_dataset
[ "region:us" ]
2023-11-04T15:27:24+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 189421, "num_examples": 172}], "download_size": 0, "dataset_size": 189421}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-05T14:57:15+00:00
[]
[]
TAGS #region-us
# Dataset Card for "mental_health_chatbot_dataset" More Information needed
[ "# Dataset Card for \"mental_health_chatbot_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"mental_health_chatbot_dataset\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"mental_health_chatbot_dataset\"\n\nMore Information needed" ]
8d94cb336db309faa23b4dc7d8cb667c0c24de86
# Dataset Card for "ffmperative-sample" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
remyxai/ffmperative-sample
[ "region:us" ]
2023-11-04T15:32:24+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 732772, "num_examples": 1889}], "download_size": 199794, "dataset_size": 732772}}
2023-11-04T15:32:26+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ffmperative-sample" More Information needed
[ "# Dataset Card for \"ffmperative-sample\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ffmperative-sample\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ffmperative-sample\"\n\nMore Information needed" ]
9feec5eb571c9112a58d5c862e8869f325549331
# Dataset Card for "t0-1.6M-flat" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
sordonia/t0-1.6M-flat
[ "region:us" ]
2023-11-04T15:44:26+00:00
{"dataset_info": {"features": [{"name": "source", "dtype": "string"}, {"name": "target", "dtype": "string"}, {"name": "task_name", "dtype": "string"}, {"name": "template_type", "dtype": "string"}, {"name": "task_source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1996543336, "num_examples": 1600000}], "download_size": 1133389101, "dataset_size": 1996543336}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-04T15:53:12+00:00
[]
[]
TAGS #region-us
# Dataset Card for "t0-1.6M-flat" More Information needed
[ "# Dataset Card for \"t0-1.6M-flat\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"t0-1.6M-flat\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"t0-1.6M-flat\"\n\nMore Information needed" ]
aa3d716ac3142d039c15550d2ebda14c3ffcdcb3
# Dataset Card for "annotated_hands_good_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ksukrit/annotated_hands_good_dataset
[ "region:us" ]
2023-11-04T16:09:35+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1354244079.1, "num_examples": 1150}], "download_size": 1318885773, "dataset_size": 1354244079.1}}
2023-11-04T16:10:54+00:00
[]
[]
TAGS #region-us
# Dataset Card for "annotated_hands_good_dataset" More Information needed
[ "# Dataset Card for \"annotated_hands_good_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"annotated_hands_good_dataset\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"annotated_hands_good_dataset\"\n\nMore Information needed" ]
dcf15887531d993538cd1facbfa81819cbe1efee
# Dataset Card for "bbc_news_ptbr_summary" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
celsowm/bbc_news_ptbr_summary
[ "region:us" ]
2023-11-04T16:23:50+00:00
{"dataset_info": {"features": [{"name": "categoria", "dtype": "string"}, {"name": "resumo", "dtype": "string"}, {"name": "titulo", "dtype": "string"}, {"name": "texto", "dtype": "string"}, {"name": "data_hora", "dtype": "string"}, {"name": "link", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1987289, "num_examples": 494}], "download_size": 1129480, "dataset_size": 1987289}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-04T16:23:53+00:00
[]
[]
TAGS #region-us
# Dataset Card for "bbc_news_ptbr_summary" More Information needed
[ "# Dataset Card for \"bbc_news_ptbr_summary\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"bbc_news_ptbr_summary\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"bbc_news_ptbr_summary\"\n\nMore Information needed" ]
4289d284712d97fcfb4d2bf2fa054abe8e8ce91d
# Dataset Card for "flan-debug-flat" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
sordonia/flan-debug-flat
[ "region:us" ]
2023-11-04T16:53:47+00:00
{"dataset_info": {"features": [{"name": "source", "dtype": "string"}, {"name": "target", "dtype": "string"}, {"name": "task_name", "dtype": "string"}, {"name": "task_source", "dtype": "string"}, {"name": "template_type", "dtype": "string"}, {"name": "template_idx", "dtype": "int64"}, {"name": "split", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 28551282, "num_examples": 18200}], "download_size": 12635228, "dataset_size": 28551282}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-10T22:40:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "flan-debug-flat" More Information needed
[ "# Dataset Card for \"flan-debug-flat\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"flan-debug-flat\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"flan-debug-flat\"\n\nMore Information needed" ]
d6fc2b923c489e6f21b8583aabd509fcae50ae9b
# Dataset Card for "org_acad" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
alexemanuel27/org_acad
[ "region:us" ]
2023-11-04T17:05:39+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "answers", "struct": [{"name": "answer_start", "sequence": "int64"}, {"name": "text", "sequence": "string"}]}, {"name": "title", "dtype": "string"}, {"name": "id", "dtype": "string"}], "splits": [{"name": "validation", "num_bytes": 628748, "num_examples": 100}], "download_size": 33141, "dataset_size": 628748}}
2023-11-04T17:11:41+00:00
[]
[]
TAGS #region-us
# Dataset Card for "org_acad" More Information needed
[ "# Dataset Card for \"org_acad\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"org_acad\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"org_acad\"\n\nMore Information needed" ]
23b752523fb749b2ca3c3408c509a14d2438f85e
# Dataset Card for "ola_polyglot_5.8B_t1_data" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
eunbinni/ola_polyglot_5.8B_t1_data
[ "region:us" ]
2023-11-04T17:23:26+00:00
{"dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "instruction", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 691281335, "num_examples": 580812}], "download_size": 399933748, "dataset_size": 691281335}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-04T17:24:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ola_polyglot_5.8B_t1_data" More Information needed
[ "# Dataset Card for \"ola_polyglot_5.8B_t1_data\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ola_polyglot_5.8B_t1_data\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ola_polyglot_5.8B_t1_data\"\n\nMore Information needed" ]
cb73bc57a667a572b501896c87d9e3a9af965d25
# Dataset Card for "classification_claims" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tingchih/classification_claims
[ "region:us" ]
2023-11-04T17:29:33+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 5314830278, "num_examples": 570692}, {"name": "test", "num_bytes": 2277898100, "num_examples": 244583}], "download_size": 4391700574, "dataset_size": 7592728378}}
2023-11-04T17:33:31+00:00
[]
[]
TAGS #region-us
# Dataset Card for "classification_claims" More Information needed
[ "# Dataset Card for \"classification_claims\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"classification_claims\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"classification_claims\"\n\nMore Information needed" ]
8dda13b8d4b5df9f3ad4c972f342040a8e15e563
# Dataset Card for "ucla_test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
anyspeech/ucla_test
[ "region:us" ]
2023-11-04T17:34:46+00:00
{"dataset_info": {"features": [{"name": "filename", "dtype": "string"}, {"name": "phones", "dtype": "string"}, {"name": "audio", "struct": [{"name": "array", "sequence": "float64"}, {"name": "sampling_rate", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 726465945, "num_examples": 5444}], "download_size": 558156867, "dataset_size": 726465945}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-04T17:35:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ucla_test" More Information needed
[ "# Dataset Card for \"ucla_test\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ucla_test\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ucla_test\"\n\nMore Information needed" ]
f6d827fbf96568b08157d92df4dbbe192348365d
# Dataset Card for "titanic1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Bilal777888/titanic1
[ "region:us" ]
2023-11-04T17:41:48+00:00
{"dataset_info": {"features": [{"name": "Passengerid", "dtype": "int64"}, {"name": "Age", "dtype": "float64"}, {"name": "Fare", "dtype": "float64"}, {"name": "Sex", "dtype": "int64"}, {"name": "sibsp", "dtype": "int64"}, {"name": "zero", "dtype": "int64"}, {"name": "zero.1", "dtype": "int64"}, {"name": "zero.2", "dtype": "int64"}, {"name": "zero.3", "dtype": "int64"}, {"name": "zero.4", "dtype": "int64"}, {"name": "zero.5", "dtype": "int64"}, {"name": "zero.6", "dtype": "int64"}, {"name": "Parch", "dtype": "int64"}, {"name": "zero.7", "dtype": "int64"}, {"name": "zero.8", "dtype": "int64"}, {"name": "zero.9", "dtype": "int64"}, {"name": "zero.10", "dtype": "int64"}, {"name": "zero.11", "dtype": "int64"}, {"name": "zero.12", "dtype": "int64"}, {"name": "zero.13", "dtype": "int64"}, {"name": "zero.14", "dtype": "int64"}, {"name": "Pclass", "dtype": "int64"}, {"name": "zero.15", "dtype": "int64"}, {"name": "zero.16", "dtype": "int64"}, {"name": "Embarked", "dtype": "float64"}, {"name": "zero.17", "dtype": "int64"}, {"name": "zero.18", "dtype": "int64"}, {"name": "2urvived", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 293380, "num_examples": 1309}], "download_size": 37364, "dataset_size": 293380}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-04T17:45:14+00:00
[]
[]
TAGS #region-us
# Dataset Card for "titanic1" More Information needed
[ "# Dataset Card for \"titanic1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"titanic1\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"titanic1\"\n\nMore Information needed" ]
9f17f0ebb2d0ac908e9a7616fedd3c1f21fec7a1
# Dataset Card for "org-acad-train-test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
alexemanuel27/org-acad-train-test
[ "region:us" ]
2023-11-04T18:11:21+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "answers", "struct": [{"name": "answer_start", "sequence": "int64"}, {"name": "text", "sequence": "string"}]}, {"name": "title", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 435339, "num_examples": 69}, {"name": "validation", "num_bytes": 193409, "num_examples": 31}], "download_size": 51330, "dataset_size": 628748}}
2023-11-04T18:21:26+00:00
[]
[]
TAGS #region-us
# Dataset Card for "org-acad-train-test" More Information needed
[ "# Dataset Card for \"org-acad-train-test\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"org-acad-train-test\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"org-acad-train-test\"\n\nMore Information needed" ]
805ccfff2decf4075e422abccbe0c08f5882f06e
# 한국어 Ultrafeedback 데이터셋셋 본 데이터는 Synatra-Translation 모델을 사용하여 번역한 데이터입니다. 수정사항 있으면 PR 부탁드립니다.
maywell/ko_Ultrafeedback_binarized_10k
[ "region:us" ]
2023-11-04T18:41:45+00:00
{"dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "chosen", "dtype": "string"}, {"name": "rejected", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 36618993, "num_examples": 10000}], "download_size": 17837945, "dataset_size": 36618993}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-04T18:45:19+00:00
[]
[]
TAGS #region-us
# 한국어 Ultrafeedback 데이터셋셋 본 데이터는 Synatra-Translation 모델을 사용하여 번역한 데이터입니다. 수정사항 있으면 PR 부탁드립니다.
[ "# 한국어 Ultrafeedback 데이터셋셋\n\n본 데이터는 Synatra-Translation 모델을 사용하여 번역한 데이터입니다.\n\n수정사항 있으면 PR 부탁드립니다." ]
[ "TAGS\n#region-us \n", "# 한국어 Ultrafeedback 데이터셋셋\n\n본 데이터는 Synatra-Translation 모델을 사용하여 번역한 데이터입니다.\n\n수정사항 있으면 PR 부탁드립니다." ]
[ 6, 30 ]
[ "passage: TAGS\n#region-us \n# 한국어 Ultrafeedback 데이터셋셋\n\n본 데이터는 Synatra-Translation 모델을 사용하여 번역한 데이터입니다.\n\n수정사항 있으면 PR 부탁드립니다." ]
43104873ebd2fce703c797a037b97f0b351c2ed3
# maywell/ko_Ultrafeedback_binarized 본 데이터는 Synatra-7B-Translation 모델을 통해 Ultrafeedback_binarized를 번역하고 정제한 데이터셋입니다. 해당 데이터를 직접적으로 상업적으로 사용하는 것은 허용되지 않으며, 데이터를 이용하여 훈련된 모델에 대한 상업적 사용은 허용됩니다. 아직 완벽히 정제되지는 않았으며, 오류나 수정사항에 대해서는 PR 부탁드립니다.
maywell/ko_Ultrafeedback_binarized
[ "region:us" ]
2023-11-04T18:41:52+00:00
{"dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "chosen", "dtype": "string"}, {"name": "rejected", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 226278590, "num_examples": 61966}], "download_size": 110040382, "dataset_size": 226278590}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-09T03:25:29+00:00
[]
[]
TAGS #region-us
# maywell/ko_Ultrafeedback_binarized 본 데이터는 Synatra-7B-Translation 모델을 통해 Ultrafeedback_binarized를 번역하고 정제한 데이터셋입니다. 해당 데이터를 직접적으로 상업적으로 사용하는 것은 허용되지 않으며, 데이터를 이용하여 훈련된 모델에 대한 상업적 사용은 허용됩니다. 아직 완벽히 정제되지는 않았으며, 오류나 수정사항에 대해서는 PR 부탁드립니다.
[ "# maywell/ko_Ultrafeedback_binarized\n\n본 데이터는 Synatra-7B-Translation 모델을 통해 Ultrafeedback_binarized를 번역하고 정제한 데이터셋입니다.\n\n해당 데이터를 직접적으로 상업적으로 사용하는 것은 허용되지 않으며, 데이터를 이용하여 훈련된 모델에 대한 상업적 사용은 허용됩니다.\n\n아직 완벽히 정제되지는 않았으며, 오류나 수정사항에 대해서는 PR 부탁드립니다." ]
[ "TAGS\n#region-us \n", "# maywell/ko_Ultrafeedback_binarized\n\n본 데이터는 Synatra-7B-Translation 모델을 통해 Ultrafeedback_binarized를 번역하고 정제한 데이터셋입니다.\n\n해당 데이터를 직접적으로 상업적으로 사용하는 것은 허용되지 않으며, 데이터를 이용하여 훈련된 모델에 대한 상업적 사용은 허용됩니다.\n\n아직 완벽히 정제되지는 않았으며, 오류나 수정사항에 대해서는 PR 부탁드립니다." ]
[ 6, 90 ]
[ "passage: TAGS\n#region-us \n# maywell/ko_Ultrafeedback_binarized\n\n본 데이터는 Synatra-7B-Translation 모델을 통해 Ultrafeedback_binarized를 번역하고 정제한 데이터셋입니다.\n\n해당 데이터를 직접적으로 상업적으로 사용하는 것은 허용되지 않으며, 데이터를 이용하여 훈련된 모델에 대한 상업적 사용은 허용됩니다.\n\n아직 완벽히 정제되지는 않았으며, 오류나 수정사항에 대해서는 PR 부탁드립니다." ]
5edab31e70847c2ae86fa61c37ab7c30589b9492
# Dataset Card for "turkishReviews-ds-mini" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
hdryilmaz/turkishReviews-ds-mini
[ "region:us" ]
2023-11-04T19:05:12+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "review", "dtype": "string"}, {"name": "review_length", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1252876.2642514652, "num_examples": 3378}, {"name": "validation", "num_bytes": 139455.7357485349, "num_examples": 376}], "download_size": 896649, "dataset_size": 1392332.0}}
2023-11-04T19:05:18+00:00
[]
[]
TAGS #region-us
# Dataset Card for "turkishReviews-ds-mini" More Information needed
[ "# Dataset Card for \"turkishReviews-ds-mini\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"turkishReviews-ds-mini\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"turkishReviews-ds-mini\"\n\nMore Information needed" ]
cbf6e87507c904b62f7b2ae06ea203f765986c55
# Dataset Card for "data_hi_1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ShrinivasSK/en_hi_1
[ "region:us" ]
2023-11-04T19:11:45+00:00
{"dataset_info": {"features": [{"name": "idx", "dtype": "int64"}, {"name": "src", "dtype": "string"}, {"name": "tgt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 6435127.8, "num_examples": 18000}, {"name": "test", "num_bytes": 715014.2, "num_examples": 2000}], "download_size": 3824291, "dataset_size": 7150142.0}}
2023-11-04T19:22:36+00:00
[]
[]
TAGS #region-us
# Dataset Card for "data_hi_1" More Information needed
[ "# Dataset Card for \"data_hi_1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"data_hi_1\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"data_hi_1\"\n\nMore Information needed" ]
7676d8d92e43dc68bb5ff5f3a43e80c421825622
# Dataset Card for "data_hi_2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ShrinivasSK/en_hi_2
[ "region:us" ]
2023-11-04T19:22:37+00:00
{"dataset_info": {"features": [{"name": "idx", "dtype": "int64"}, {"name": "src", "dtype": "string"}, {"name": "tgt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 6356499.3, "num_examples": 18000}, {"name": "test", "num_bytes": 706277.7, "num_examples": 2000}], "download_size": 3784127, "dataset_size": 7062777.0}}
2023-11-04T19:22:44+00:00
[]
[]
TAGS #region-us
# Dataset Card for "data_hi_2" More Information needed
[ "# Dataset Card for \"data_hi_2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"data_hi_2\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"data_hi_2\"\n\nMore Information needed" ]
179863e2b9607164349a6cc84dd0617e885f0ade
# Dataset Card for "data_hi_3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ShrinivasSK/en_hi_3
[ "region:us" ]
2023-11-04T19:22:44+00:00
{"dataset_info": {"features": [{"name": "idx", "dtype": "int64"}, {"name": "src", "dtype": "string"}, {"name": "tgt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 6423499.8, "num_examples": 18000}, {"name": "test", "num_bytes": 713722.2, "num_examples": 2000}], "download_size": 3835153, "dataset_size": 7137222.0}}
2023-11-04T19:22:51+00:00
[]
[]
TAGS #region-us
# Dataset Card for "data_hi_3" More Information needed
[ "# Dataset Card for \"data_hi_3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"data_hi_3\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"data_hi_3\"\n\nMore Information needed" ]
5f838300e0ef39ba150cc24774ffd64201e21afe
# Dataset Card for "data_kn_1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ShrinivasSK/en_kn_1
[ "region:us" ]
2023-11-04T19:22:51+00:00
{"dataset_info": {"features": [{"name": "idx", "dtype": "int64"}, {"name": "src", "dtype": "string"}, {"name": "tgt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4017735.9, "num_examples": 18000}, {"name": "test", "num_bytes": 446415.1, "num_examples": 2000}], "download_size": 2392888, "dataset_size": 4464151.0}}
2023-11-04T19:22:57+00:00
[]
[]
TAGS #region-us
# Dataset Card for "data_kn_1" More Information needed
[ "# Dataset Card for \"data_kn_1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"data_kn_1\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"data_kn_1\"\n\nMore Information needed" ]
bd7e9bcdf55d22eccfa8190c5eabfb194fb2af0f
# Dataset Card for "data_kn_2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ShrinivasSK/en_kn_2
[ "region:us" ]
2023-11-04T19:22:57+00:00
{"dataset_info": {"features": [{"name": "idx", "dtype": "int64"}, {"name": "src", "dtype": "string"}, {"name": "tgt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4039020.0, "num_examples": 18000}, {"name": "test", "num_bytes": 448780.0, "num_examples": 2000}], "download_size": 2402763, "dataset_size": 4487800.0}}
2023-11-04T19:23:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "data_kn_2" More Information needed
[ "# Dataset Card for \"data_kn_2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"data_kn_2\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"data_kn_2\"\n\nMore Information needed" ]
428acc0043331c3b947af4c9ec5b2b3b341a5332
# Dataset Card for "data_kn_3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ShrinivasSK/en_kn_3
[ "region:us" ]
2023-11-04T19:23:05+00:00
{"dataset_info": {"features": [{"name": "idx", "dtype": "int64"}, {"name": "src", "dtype": "string"}, {"name": "tgt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3976936.2, "num_examples": 18000}, {"name": "test", "num_bytes": 441881.8, "num_examples": 2000}], "download_size": 2363947, "dataset_size": 4418818.0}}
2023-11-04T19:23:10+00:00
[]
[]
TAGS #region-us
# Dataset Card for "data_kn_3" More Information needed
[ "# Dataset Card for \"data_kn_3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"data_kn_3\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"data_kn_3\"\n\nMore Information needed" ]
abf9ade16c7657ac1b175880faa05ef9c5461a9e
# Dataset Card for "data_mr_1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ShrinivasSK/en_mr_1
[ "region:us" ]
2023-11-04T19:23:11+00:00
{"dataset_info": {"features": [{"name": "idx", "dtype": "int64"}, {"name": "src", "dtype": "string"}, {"name": "tgt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4557684.6, "num_examples": 18000}, {"name": "test", "num_bytes": 506409.4, "num_examples": 2000}], "download_size": 2671507, "dataset_size": 5064094.0}}
2023-11-04T19:23:18+00:00
[]
[]
TAGS #region-us
# Dataset Card for "data_mr_1" More Information needed
[ "# Dataset Card for \"data_mr_1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"data_mr_1\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"data_mr_1\"\n\nMore Information needed" ]
8cfc579c85a7369985ed5ce6fae5d6b281559b69
# Dataset Card for "data_te_1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ShrinivasSK/en_te_1
[ "region:us" ]
2023-11-04T19:23:19+00:00
{"dataset_info": {"features": [{"name": "idx", "dtype": "int64"}, {"name": "src", "dtype": "string"}, {"name": "tgt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4048999.2, "num_examples": 18000}, {"name": "test", "num_bytes": 449888.8, "num_examples": 2000}], "download_size": 2422001, "dataset_size": 4498888.0}}
2023-11-04T19:23:24+00:00
[]
[]
TAGS #region-us
# Dataset Card for "data_te_1" More Information needed
[ "# Dataset Card for \"data_te_1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"data_te_1\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"data_te_1\"\n\nMore Information needed" ]
f30ca71fe41e71bc6a2ca092dc92dbcc83f26f1e
# Dataset Card for "data_te_2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ShrinivasSK/en_te_2
[ "region:us" ]
2023-11-04T19:23:25+00:00
{"dataset_info": {"features": [{"name": "idx", "dtype": "int64"}, {"name": "src", "dtype": "string"}, {"name": "tgt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4108551.3, "num_examples": 18000}, {"name": "test", "num_bytes": 456505.7, "num_examples": 2000}], "download_size": 2453776, "dataset_size": 4565057.0}}
2023-11-04T19:23:31+00:00
[]
[]
TAGS #region-us
# Dataset Card for "data_te_2" More Information needed
[ "# Dataset Card for \"data_te_2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"data_te_2\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"data_te_2\"\n\nMore Information needed" ]
d28c5e489228e3bceab73b299f27e09e59a1f1c3
# Dataset Card for "data_te_3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ShrinivasSK/en_te_3
[ "region:us" ]
2023-11-04T19:23:31+00:00
{"dataset_info": {"features": [{"name": "idx", "dtype": "int64"}, {"name": "src", "dtype": "string"}, {"name": "tgt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4131310.5, "num_examples": 18000}, {"name": "test", "num_bytes": 459034.5, "num_examples": 2000}], "download_size": 2465569, "dataset_size": 4590345.0}}
2023-11-04T19:23:37+00:00
[]
[]
TAGS #region-us
# Dataset Card for "data_te_3" More Information needed
[ "# Dataset Card for \"data_te_3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"data_te_3\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"data_te_3\"\n\nMore Information needed" ]
31bdd34e6aa852eef26e243faced5a9f6c24c389
# Dataset Card for "hi-kn" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ShrinivasSK/hi_kn
[ "region:us" ]
2023-11-04T19:28:12+00:00
{"dataset_info": {"features": [{"name": "source", "dtype": "string"}, {"name": "target", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 5132190.6, "num_examples": 18000}, {"name": "test", "num_bytes": 570243.4, "num_examples": 2000}], "download_size": 2596240, "dataset_size": 5702434.0}}
2023-11-04T19:28:20+00:00
[]
[]
TAGS #region-us
# Dataset Card for "hi-kn" More Information needed
[ "# Dataset Card for \"hi-kn\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"hi-kn\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"hi-kn\"\n\nMore Information needed" ]
8679e2a745af5725ad1361ce6cfb19ecb44f99b3
# Dataset Card for "hi-te" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ShrinivasSK/hi_te
[ "region:us" ]
2023-11-04T19:28:29+00:00
{"dataset_info": {"features": [{"name": "source", "dtype": "string"}, {"name": "target", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 5294133.0, "num_examples": 18000}, {"name": "test", "num_bytes": 588237.0, "num_examples": 2000}], "download_size": 2685371, "dataset_size": 5882370.0}}
2023-11-04T19:28:34+00:00
[]
[]
TAGS #region-us
# Dataset Card for "hi-te" More Information needed
[ "# Dataset Card for \"hi-te\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"hi-te\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"hi-te\"\n\nMore Information needed" ]
1470ebf27c9fdf65850ceb841ccd757e4aa79aa5
# MACCROBAT-biomedical-ner This data is the same data from [here](https://figshare.com/articles/dataset/MACCROBAT2018/9764942), the only difference is that it has been converted into the Huggingface dataset format. So it can be easily loaded and can be used wherever need. To convert from the orginal format to huggingface dataset format, followed the following steps (**To know in more detail look at the `create_dataset.py` file**): * Read corresponding `*.txt` and `*.ann` file. * Used `pandas` to convert the `*.ann` file into dataframe. * After converting into dataframe, did some processing and converted NER label information into: ```JSON { "text": "ner-text", "label": "ner-label", "start": 10, "end": 20 } ``` * Standard labels are converted into `B-Tag` and `I-tag`, where `B`- stands for begning of the tag and `I` - stands for inside the tag. * Finally the JSON is created and uploaded here. ## Source Data This ZIP-compressed file contains 200 source documents (in plain text, on sentence per line) and 200 annotation documents (in brat standoff format). Documents are named using PubMed document IDs, e.g. "15939911.txt" contains text from the document "A young man with palpitations and Ebstein's anomaly of the tricuspid valve" by Marcu and Donohue. Text is from PubMed Central full-text documents but has been edited to include only clinical case report details. All annotations were created manually. "MACCROBAT2020" is the second release of this dataset, following "MACCROBAT2018". The consistency and format of annotations has been improved in the newest version. ## Uses Use below snippet to load the data properly and it can be used to finetune medical based NER model with some additional processing. ```Python from datasets import load_dataset # load the data medical_ner_data = load_dataset("singh-aditya/MACCROBAT_biomedical_ner") print(medical_ner_data) ``` ``` DatasetDict({ train: Dataset({ features: ['ner_labels', 'tokens', 'full_text', 'ner_info'], num_rows: 200 }) }) ``` <!-- Address questions around how the dataset is intended to be used. --> ## Dataset Structure ``` { 'full_text': "CASE: A 28-year-old previously healthy man presented with a 6-week history of palpitations.\nThe symptoms occurred during rest, 2–3 times per week, lasted up to 30 minutes at a time and were associated with dyspnea.\nExcept for a grade 2/6 holosystolic tricuspid regurgitation murmur (best heard at the left sternal border with inspiratory accentuation), physical examination yielded unremarkable findings.\nAn electrocardiogram (ECG) revealed normal sinus rhythm and a Wolff– Parkinson– White pre-excitation pattern (Fig.1: Top), produced by a right-sided accessory pathway.\nTransthoracic echocardiography demonstrated the presence of Ebstein's anomaly of the tricuspid valve, with apical displacement of the valve and formation of an “atrialized” right ventricle (a functional unit between the right atrium and the inlet [inflow] portion of the right ventricle) (Fig.2).\nThe anterior tricuspid valve leaflet was elongated (Fig.2C, arrow), whereas the septal leaflet was rudimentary (Fig.2C, arrowhead).\nContrast echocardiography using saline revealed a patent foramen ovale with right-to-left shunting and bubbles in the left atrium (Fig.2D).\nThe patient underwent an electrophysiologic study with mapping of the accessory pathway, followed by radiofrequency ablation (interruption of the pathway using the heat generated by electromagnetic waves at the tip of an ablation catheter).\nHis post-ablation ECG showed a prolonged PR interval and an odd “second” QRS complex in leads III, aVF and V2–V4 (Fig.1Bottom), a consequence of abnormal impulse conduction in the “atrialized” right ventricle.\nThe patient reported no recurrence of palpitations at follow-up 6 months after the ablation.\n", 'ner_info': [ { 'text': '28-year-old', 'label': 'AGE', 'start': 8, 'end': 19 }, {'text': 'previously healthy', 'label': 'HISTORY', 'start': 20, 'end': 38}, {'text': 'man', 'label': 'SEX', 'start': 39, 'end': 42}, {'text': 'presented', 'label': 'CLINICAL_EVENT', 'start': 43, 'end': 52}, {'text': '6-week', 'label': 'DURATION', 'start': 60, 'end': 66}, {'text': 'palpitations', 'label': 'SIGN_SYMPTOM', 'start': 78, 'end': 90}, {'text': 'symptoms', 'label': 'COREFERENCE', 'start': 96, 'end': 104}, {'text': 'rest', 'label': 'CLINICAL_EVENT', 'start': 121, 'end': 125}, {'text': '2–3 times per week', 'label': 'FREQUENCY', 'start': 127, 'end': 145}, {'text': 'up to 30 minutes at a time', 'label': 'DETAILED_DESCRIPTION', 'start': 154, 'end': 180}, {'text': 'dyspnea', 'label': 'SIGN_SYMPTOM', 'start': 206, 'end': 213}, {'text': 'grade 2/6', 'label': 'LAB_VALUE', 'start': 228, 'end': 237}, {'text': 'holosystolic', 'label': 'DETAILED_DESCRIPTION', 'start': 238, 'end': 250}, {'text': 'tricuspid', 'label': 'BIOLOGICAL_STRUCTURE', 'start': 251, 'end': 260}, {'text': 'regurgitation murmur', 'label': 'SIGN_SYMPTOM', 'start': 261, 'end': 281}, {'text': 'left sternal border', 'label': 'BIOLOGICAL_STRUCTURE', 'start': 301, 'end': 320}, {'text': 'inspiratory accentuation', 'label': 'DETAILED_DESCRIPTION', 'start': 326, 'end': 350}, {'text': 'physical examination', 'label': 'DIAGNOSTIC_PROCEDURE', 'start': 353, 'end': 373}, {'text': 'unremarkable', 'label': 'LAB_VALUE', 'start': 382, 'end': 394}, {'text': 'electrocardiogram', 'label': 'DIAGNOSTIC_PROCEDURE', 'start': 408, 'end': 425}, {'text': 'ECG', 'label': 'DIAGNOSTIC_PROCEDURE', 'start': 427, 'end': 430}, {'text': 'normal', 'label': 'LAB_VALUE', 'start': 441, 'end': 447}, {'text': 'sinus rhythm', 'label': 'DIAGNOSTIC_PROCEDURE', 'start': 448, 'end': 460}, {'text': 'Wolff– Parkinson– White pre-excitation pattern', 'label': 'SIGN_SYMPTOM', 'start': 467, 'end': 513}, {'text': 'right-sided', 'label': 'DETAILED_DESCRIPTION', 'start': 542, 'end': 553}, {'text': 'accessory pathway', 'label': 'DISEASE_DISORDER', 'start': 554, 'end': 571}, {'text': 'Transthoracic', 'label': 'BIOLOGICAL_STRUCTURE', 'start': 573, 'end': 586}, {'text': 'echocardiography', 'label': 'DIAGNOSTIC_PROCEDURE', 'start': 587, 'end': 603}, {'text': "Ebstein's anomaly", 'label': 'DISEASE_DISORDER', 'start': 633, 'end': 650}, {'text': 'tricuspid valve', 'label': 'BIOLOGICAL_STRUCTURE', 'start': 658, 'end': 673}, {'text': 'apical displacement', 'label': 'SIGN_SYMPTOM', 'start': 680, 'end': 699}, {'text': 'valve', 'label': 'COREFERENCE', 'start': 707, 'end': 712}, {'text': 'atrialized', 'label': 'DISEASE_DISORDER', 'start': 734, 'end': 744}, {'text': 'right ventricle', 'label': 'BIOLOGICAL_STRUCTURE', 'start': 746, 'end': 761}, {'text': 'right atrium', 'label': 'BIOLOGICAL_STRUCTURE', 'start': 793, 'end': 805}, {'text': 'inlet', 'label': 'BIOLOGICAL_STRUCTURE', 'start': 814, 'end': 819}, {'text': 'right ventricle', 'label': 'BIOLOGICAL_STRUCTURE', 'start': 844, 'end': 859}, {'text': 'anterior tricuspid valve leaflet', 'label': 'BIOLOGICAL_STRUCTURE', 'start': 874, 'end': 906}, {'text': 'elongated', 'label': 'SIGN_SYMPTOM', 'start': 911, 'end': 920}, {'text': 'septal leaflet', 'label': 'BIOLOGICAL_STRUCTURE', 'start': 950, 'end': 964}, {'text': 'rudimentary', 'label': 'SIGN_SYMPTOM', 'start': 969, 'end': 980}, {'text': 'Contrast', 'label': 'DETAILED_DESCRIPTION', 'start': 1002, 'end': 1010}, {'text': 'echocardiography', 'label': 'DIAGNOSTIC_PROCEDURE', 'start': 1011, 'end': 1027}, {'text': 'using saline', 'label': 'DETAILED_DESCRIPTION', 'start': 1028, 'end': 1040}, {'text': 'patent foramen ovale', 'label': 'DISEASE_DISORDER', 'start': 1052, 'end': 1072}, {'text': 'right-to-left shunting', 'label': 'SIGN_SYMPTOM', 'start': 1078, 'end': 1100}, {'text': 'bubbles', 'label': 'SIGN_SYMPTOM', 'start': 1105, 'end': 1112}, {'text': 'left atrium', 'label': 'BIOLOGICAL_STRUCTURE', 'start': 1120, 'end': 1131}, {'text': 'electrophysiologic study', 'label': 'DIAGNOSTIC_PROCEDURE', 'start': 1167, 'end': 1191}, {'text': 'mapping', 'label': 'DIAGNOSTIC_PROCEDURE', 'start': 1197, 'end': 1204}, {'text': 'accessory pathway', 'label': 'BIOLOGICAL_STRUCTURE', 'start': 1212, 'end': 1229}, {'text': 'radiofrequency', 'label': 'DETAILED_DESCRIPTION', 'start': 1243, 'end': 1257}, {'text': 'ablation', 'label': 'THERAPEUTIC_PROCEDURE', 'start': 1258, 'end': 1266}, {'text': 'ablation catheter', 'label': 'THERAPEUTIC_PROCEDURE', 'start': 1363, 'end': 1380}, {'text': 'ECG', 'label': 'DIAGNOSTIC_PROCEDURE', 'start': 1401, 'end': 1404}, {'text': 'prolonged', 'label': 'LAB_VALUE', 'start': 1414, 'end': 1423}, {'text': 'PR interval', 'label': 'DIAGNOSTIC_PROCEDURE', 'start': 1424, 'end': 1435}, {'text': 'odd', 'label': 'LAB_VALUE', 'start': 1443, 'end': 1446}, {'text': '“second”', 'label': 'LAB_VALUE', 'start': 1447, 'end': 1455}, {'text': 'QRS complex', 'label': 'DIAGNOSTIC_PROCEDURE', 'start': 1456, 'end': 1467}, {'text': 'leads III, aVF and V2–V4', 'label': 'DETAILED_DESCRIPTION', 'start': 1471, 'end': 1495}, {'text': 'abnormal impulse conduction', 'label': 'DISEASE_DISORDER', 'start': 1528, 'end': 1555}, {'text': 'atrialized', 'label': 'DISEASE_DISORDER', 'start': 1564, 'end': 1574}, {'text': 'right ventricle', 'label': 'BIOLOGICAL_STRUCTURE', 'start': 1576, 'end': 1591}, {'text': 'palpitations', 'label': 'SIGN_SYMPTOM', 'start': 1631, 'end': 1643}, {'text': 'follow-up', 'label': 'CLINICAL_EVENT', 'start': 1647, 'end': 1656}, {'text': '6 months after', 'label': 'DATE', 'start': 1657, 'end': 1671}], 'tokens': ['CASE: A ', '28-year-old', ' ', 'previously healthy', ' ', 'man', ' ', 'presented', ' with a ', '6-week', ' history of ', 'palpitations', '.\nThe ', 'symptoms', ' occurred during ', 'rest', ', ', '2–3 times per week', ', lasted ', 'up to 30 minutes at a time', ' and were associated with ', 'dyspnea', '.\nExcept for a ', 'grade 2/6', ' ', 'holosystolic', ' ', 'tricuspid', ' ', 'regurgitation murmur', ' (best heard at the ', 'left sternal border', ' with ', 'inspiratory accentuation', '), ', 'physical examination', ' yielded ', 'unremarkable', ' findings.\nAn ', 'electrocardiogram', ' (', 'ECG', ') revealed ', 'normal', ' ', 'sinus rhythm', ' and a ', 'Wolff– Parkinson– White pre-excitation pattern', ' (Fig.1: Top), produced by a ', 'right-sided', ' ', 'accessory pathway', '.\n', 'Transthoracic', ' ', 'echocardiography', ' demonstrated the presence of ', "Ebstein's anomaly", ' of the ', 'tricuspid valve', ', with ', 'apical displacement', ' of the ', 'valve', ' and formation of an “', 'atrialized', '” ', 'right ventricle', ' (a functional unit between the ', 'right atrium', ' and the ', 'inlet', ' [inflow] portion of the ', 'right ventricle', ') (Fig.2).\nThe ', 'anterior tricuspid valve leaflet', ' was ', 'elongated', ' (Fig.2C, arrow), whereas the ', 'septal leaflet', ' was ', 'rudimentary', ' (Fig.2C, arrowhead).\n', 'Contrast', ' ', 'echocardiography', ' ', 'using saline', ' revealed a ', 'patent foramen ovale', ' with ', 'right-to-left shunting', ' and ', 'bubbles', ' in the ', 'left atrium', ' (Fig.2D).\nThe patient underwent an ', 'electrophysiologic study', ' with ', 'mapping', ' of the ', 'accessory pathway', ', followed by ', 'radiofrequency', ' ', 'ablation', ' (interruption of the pathway using the heat generated by electromagnetic waves at the tip of an ', 'ablation catheter', ').\nHis post-ablation ', 'ECG', ' showed a ', 'prolonged', ' ', 'PR interval', ' and an ', 'odd', ' ', '“second”', ' ', 'QRS complex', ' in ', 'leads III, aVF and V2–V4', ' (Fig.1Bottom), a consequence of ', 'abnormal impulse conduction', ' in the “', 'atrialized', '” ', 'right ventricle', '.\nThe patient reported no recurrence of ', 'palpitations', ' at ', 'follow-up', ' ', '6 months after', ' the ablation.\n'], 'ner_labels': [0, 5, 0, 39, 0, 65, 0, 13, 0, 32, 0, 69, 0, 18, 0, 13, 0, 35, 0, 22, 0, 69, 0, 42, 0, 22, 0, 12, 0, 69, 0, 12, 0, 22, 0, 24, 0, 42, 0, 24, 0, 24, 0, 42, 0, 24, 0, 69, 0, 22, 0, 26, 0, 12, 0, 24, 0, 26, 0, 12, 0, 69, 0, 18, 0, 26, 0, 12, 0, 12, 0, 12, 0, 12, 0, 12, 0, 69, 0, 12, 0, 69, 0, 22, 0, 24, 0, 22, 0, 26, 0, 69, 0, 69, 0, 12, 0, 24, 0, 24, 0, 12, 0, 22, 0, 75, 0, 75, 0, 24, 0, 42, 0, 24, 0, 42, 0, 42, 0, 24, 0, 22, 0, 26, 0, 26, 0, 12, 0, 69, 0, 13, 0, 19, 0]} ``` ## NER-Lables ```Python NER_lables = [ "O", "B-ACTIVITY", "I-ACTIVITY", "I-ADMINISTRATION", "B-ADMINISTRATION", "B-AGE", "I-AGE", "I-AREA", "B-AREA", "B-BIOLOGICAL_ATTRIBUTE", "I-BIOLOGICAL_ATTRIBUTE", "I-BIOLOGICAL_STRUCTURE", "B-BIOLOGICAL_STRUCTURE", "B-CLINICAL_EVENT", "I-CLINICAL_EVENT", "B-COLOR", "I-COLOR", "I-COREFERENCE", "B-COREFERENCE", "B-DATE", "I-DATE", "I-DETAILED_DESCRIPTION", "B-DETAILED_DESCRIPTION", "I-DIAGNOSTIC_PROCEDURE", "B-DIAGNOSTIC_PROCEDURE", "I-DISEASE_DISORDER", "B-DISEASE_DISORDER", "B-DISTANCE", "I-DISTANCE", "B-DOSAGE", "I-DOSAGE", "I-DURATION", "B-DURATION", "I-FAMILY_HISTORY", "B-FAMILY_HISTORY", "B-FREQUENCY", "I-FREQUENCY", "I-HEIGHT", "B-HEIGHT", "B-HISTORY", "I-HISTORY", "I-LAB_VALUE", "B-LAB_VALUE", "I-MASS", "B-MASS", "I-MEDICATION", "B-MEDICATION", "I-NONBIOLOGICAL_LOCATION", "B-NONBIOLOGICAL_LOCATION", "I-OCCUPATION", "B-OCCUPATION", "B-OTHER_ENTITY", "I-OTHER_ENTITY", "B-OTHER_EVENT", "I-OTHER_EVENT", "I-OUTCOME", "B-OUTCOME", "I-PERSONAL_BACKGROUND", "B-PERSONAL_BACKGROUND", "B-QUALITATIVE_CONCEPT", "I-QUALITATIVE_CONCEPT", "I-QUANTITATIVE_CONCEPT", "B-QUANTITATIVE_CONCEPT", "B-SEVERITY", "I-SEVERITY", "B-SEX", "I-SEX", "B-SHAPE", "I-SHAPE", "B-SIGN_SYMPTOM", "I-SIGN_SYMPTOM", "B-SUBJECT", "I-SUBJECT", "B-TEXTURE", "I-TEXTURE", "B-THERAPEUTIC_PROCEDURE", "I-THERAPEUTIC_PROCEDURE", "I-TIME", "B-TIME", "B-VOLUME", "I-VOLUME", "I-WEIGHT", "B-WEIGHT", ] ``` **BibTeX:** ```JSON { article= Caufield2020, author = "J. Harry Caufield", title = "{MACCROBAT}", year = "2020", month = "1", url = "https://figshare.com/articles/dataset/MACCROBAT2018/9764942", doi = "10.6084/m9.figshare.9764942.v2" } ```
singh-aditya/MACCROBAT_biomedical_ner
[ "task_categories:token-classification", "size_categories:1M<n<10M", "language:en", "license:mit", "biology", "medical", "region:us" ]
2023-11-04T19:57:50+00:00
{"language": ["en"], "license": "mit", "size_categories": ["1M<n<10M"], "task_categories": ["token-classification"], "tags": ["biology", "medical"], "field": ["data"]}
2023-11-05T02:19:17+00:00
[]
[ "en" ]
TAGS #task_categories-token-classification #size_categories-1M<n<10M #language-English #license-mit #biology #medical #region-us
# MACCROBAT-biomedical-ner This data is the same data from here, the only difference is that it has been converted into the Huggingface dataset format. So it can be easily loaded and can be used wherever need. To convert from the orginal format to huggingface dataset format, followed the following steps (To know in more detail look at the 'create_dataset.py' file): * Read corresponding '*.txt' and '*.ann' file. * Used 'pandas' to convert the '*.ann' file into dataframe. * After converting into dataframe, did some processing and converted NER label information into: * Standard labels are converted into 'B-Tag' and 'I-tag', where 'B'- stands for begning of the tag and 'I' - stands for inside the tag. * Finally the JSON is created and uploaded here. ## Source Data This ZIP-compressed file contains 200 source documents (in plain text, on sentence per line) and 200 annotation documents (in brat standoff format). Documents are named using PubMed document IDs, e.g. "URL" contains text from the document "A young man with palpitations and Ebstein's anomaly of the tricuspid valve" by Marcu and Donohue. Text is from PubMed Central full-text documents but has been edited to include only clinical case report details. All annotations were created manually. "MACCROBAT2020" is the second release of this dataset, following "MACCROBAT2018". The consistency and format of annotations has been improved in the newest version. ## Uses Use below snippet to load the data properly and it can be used to finetune medical based NER model with some additional processing. ## Dataset Structure ## NER-Lables BibTeX:
[ "# MACCROBAT-biomedical-ner\nThis data is the same data from here, the only difference is that it has been converted into the Huggingface dataset format. So it can be easily loaded and can be used wherever need.\n\nTo convert from the orginal format to huggingface dataset format, followed the following steps (To know in more detail look at the 'create_dataset.py' file):\n* Read corresponding '*.txt' and '*.ann' file.\n* Used 'pandas' to convert the '*.ann' file into dataframe.\n* After converting into dataframe, did some processing and converted NER label information into:\n \n* Standard labels are converted into 'B-Tag' and 'I-tag', where 'B'- stands for begning of the tag and 'I' - stands for inside the tag.\n* Finally the JSON is created and uploaded here.", "## Source Data\nThis ZIP-compressed file contains 200 source documents (in plain text, on sentence per line) and 200 annotation documents (in brat standoff format). Documents are named using PubMed document IDs, e.g. \"URL\" contains text from the document \"A young man with palpitations and Ebstein's anomaly of the tricuspid valve\" by Marcu and Donohue. Text is from PubMed Central full-text documents but has been edited to include only clinical case report details. All annotations were created manually.\n\n\"MACCROBAT2020\" is the second release of this dataset, following \"MACCROBAT2018\". The consistency and format of annotations has been improved in the newest version.", "## Uses\nUse below snippet to load the data properly and it can be used to finetune medical based NER model with some additional processing.", "## Dataset Structure", "## NER-Lables\n\n\n\n\n\nBibTeX:" ]
[ "TAGS\n#task_categories-token-classification #size_categories-1M<n<10M #language-English #license-mit #biology #medical #region-us \n", "# MACCROBAT-biomedical-ner\nThis data is the same data from here, the only difference is that it has been converted into the Huggingface dataset format. So it can be easily loaded and can be used wherever need.\n\nTo convert from the orginal format to huggingface dataset format, followed the following steps (To know in more detail look at the 'create_dataset.py' file):\n* Read corresponding '*.txt' and '*.ann' file.\n* Used 'pandas' to convert the '*.ann' file into dataframe.\n* After converting into dataframe, did some processing and converted NER label information into:\n \n* Standard labels are converted into 'B-Tag' and 'I-tag', where 'B'- stands for begning of the tag and 'I' - stands for inside the tag.\n* Finally the JSON is created and uploaded here.", "## Source Data\nThis ZIP-compressed file contains 200 source documents (in plain text, on sentence per line) and 200 annotation documents (in brat standoff format). Documents are named using PubMed document IDs, e.g. \"URL\" contains text from the document \"A young man with palpitations and Ebstein's anomaly of the tricuspid valve\" by Marcu and Donohue. Text is from PubMed Central full-text documents but has been edited to include only clinical case report details. All annotations were created manually.\n\n\"MACCROBAT2020\" is the second release of this dataset, following \"MACCROBAT2018\". The consistency and format of annotations has been improved in the newest version.", "## Uses\nUse below snippet to load the data properly and it can be used to finetune medical based NER model with some additional processing.", "## Dataset Structure", "## NER-Lables\n\n\n\n\n\nBibTeX:" ]
[ 45, 209, 172, 32, 6, 11 ]
[ "passage: TAGS\n#task_categories-token-classification #size_categories-1M<n<10M #language-English #license-mit #biology #medical #region-us \n# MACCROBAT-biomedical-ner\nThis data is the same data from here, the only difference is that it has been converted into the Huggingface dataset format. So it can be easily loaded and can be used wherever need.\n\nTo convert from the orginal format to huggingface dataset format, followed the following steps (To know in more detail look at the 'create_dataset.py' file):\n* Read corresponding '*.txt' and '*.ann' file.\n* Used 'pandas' to convert the '*.ann' file into dataframe.\n* After converting into dataframe, did some processing and converted NER label information into:\n \n* Standard labels are converted into 'B-Tag' and 'I-tag', where 'B'- stands for begning of the tag and 'I' - stands for inside the tag.\n* Finally the JSON is created and uploaded here.## Source Data\nThis ZIP-compressed file contains 200 source documents (in plain text, on sentence per line) and 200 annotation documents (in brat standoff format). Documents are named using PubMed document IDs, e.g. \"URL\" contains text from the document \"A young man with palpitations and Ebstein's anomaly of the tricuspid valve\" by Marcu and Donohue. Text is from PubMed Central full-text documents but has been edited to include only clinical case report details. All annotations were created manually.\n\n\"MACCROBAT2020\" is the second release of this dataset, following \"MACCROBAT2018\". The consistency and format of annotations has been improved in the newest version.## Uses\nUse below snippet to load the data properly and it can be used to finetune medical based NER model with some additional processing.## Dataset Structure## NER-Lables\n\n\n\n\n\nBibTeX:" ]
47b7571d3c1a521d59d4021541220f14e69b4215
# Dataset Card for "my_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
san457/my_dataset
[ "region:us" ]
2023-11-04T20:02:56+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}], "splits": [{"name": "train", "num_bytes": 79302267.0, "num_examples": 3}], "download_size": 77773397, "dataset_size": 79302267.0}}
2023-11-04T20:06:28+00:00
[]
[]
TAGS #region-us
# Dataset Card for "my_dataset" More Information needed
[ "# Dataset Card for \"my_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"my_dataset\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"my_dataset\"\n\nMore Information needed" ]
768dd8b4d5afd6efaef92d6f1c836fe04b1a47b4
# Dataset Card for "eva-pix2pix" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ktrinh38/eva-pix2pix
[ "region:us" ]
2023-11-04T20:33:10+00:00
{"dataset_info": {"features": [{"name": "input_image", "dtype": "image"}, {"name": "edit_prompt", "dtype": "string"}, {"name": "edited_image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 712910099.55, "num_examples": 4291}], "download_size": 337563830, "dataset_size": 712910099.55}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-04T20:33:49+00:00
[]
[]
TAGS #region-us
# Dataset Card for "eva-pix2pix" More Information needed
[ "# Dataset Card for \"eva-pix2pix\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"eva-pix2pix\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"eva-pix2pix\"\n\nMore Information needed" ]
24f0c4c69687638e6556ecb54e8e0bcb695edba5
This dataset is for CMU MCDS capstone "Multimodal Question Answering" project. ### Train There are two datasets for router training. **router_train.csv** contains 49009 samples from 5 datasets CLEVR(10000), VQA-CP v2(10000), TallyQA(10000), GQA(10000), and OKVQA(9009). **router_train_small.csv** contains 10000 samples from 5 datasets CLEVR(2000), VQA-CP v2(2000), TallyQA(2000), GQA(2000), and OKVQA(2000). The small version could be used if 10000 samples are enough to train the router model. ### Test There are two datasets for final evaluation. **mqa_test_large.csv** contains 41082 samples from 6 datasets CLEVR(10000), VQA-CP v2(10000), TallyQA(10000), GQA(10000), OKVQA(602), and A-OKVQA(480). **mqa_test_balanced.csv** contains 5000 samples from 6 datasets CLEVR(1000), VQA-CP v2(1000), TallyQA(1000), GQA(1000), OKVQA(602), and A-OKVQA(398). The images in the test sets didn't appear in any thrust's training set. ### Annotation Details * image_path: name of the image file in corresponding folder. (train_images.zip, train_images_small.zip, test_images_large.zip, test_images_balanced.zip) * question: question string * answer: answer string, may containing several words * source_image_id: source of the image * sample_dataset: source of the question * sample_question_id: question_id in the original dataset, could be None The last three attributes are only for tracking and evaluation by group.
yujiaw2/capstoneMQA
[ "region:us" ]
2023-11-04T20:33:19+00:00
{}
2023-11-04T23:05:40+00:00
[]
[]
TAGS #region-us
This dataset is for CMU MCDS capstone "Multimodal Question Answering" project. ### Train There are two datasets for router training. router_train.csv contains 49009 samples from 5 datasets CLEVR(10000), VQA-CP v2(10000), TallyQA(10000), GQA(10000), and OKVQA(9009). router_train_small.csv contains 10000 samples from 5 datasets CLEVR(2000), VQA-CP v2(2000), TallyQA(2000), GQA(2000), and OKVQA(2000). The small version could be used if 10000 samples are enough to train the router model. ### Test There are two datasets for final evaluation. mqa_test_large.csv contains 41082 samples from 6 datasets CLEVR(10000), VQA-CP v2(10000), TallyQA(10000), GQA(10000), OKVQA(602), and A-OKVQA(480). mqa_test_balanced.csv contains 5000 samples from 6 datasets CLEVR(1000), VQA-CP v2(1000), TallyQA(1000), GQA(1000), OKVQA(602), and A-OKVQA(398). The images in the test sets didn't appear in any thrust's training set. ### Annotation Details * image_path: name of the image file in corresponding folder. (train_images.zip, train_images_small.zip, test_images_large.zip, test_images_balanced.zip) * question: question string * answer: answer string, may containing several words * source_image_id: source of the image * sample_dataset: source of the question * sample_question_id: question_id in the original dataset, could be None The last three attributes are only for tracking and evaluation by group.
[ "### Train\nThere are two datasets for router training. router_train.csv contains 49009 samples from 5 datasets CLEVR(10000), VQA-CP v2(10000), TallyQA(10000), GQA(10000), and OKVQA(9009). router_train_small.csv contains 10000 samples from 5 datasets CLEVR(2000), VQA-CP v2(2000), TallyQA(2000), GQA(2000), and OKVQA(2000). \n\nThe small version could be used if 10000 samples are enough to train the router model.", "### Test\nThere are two datasets for final evaluation. mqa_test_large.csv contains 41082 samples from 6 datasets CLEVR(10000), VQA-CP v2(10000), TallyQA(10000), GQA(10000), OKVQA(602), and A-OKVQA(480). mqa_test_balanced.csv contains 5000 samples from 6 datasets CLEVR(1000), VQA-CP v2(1000), TallyQA(1000), GQA(1000), OKVQA(602), and A-OKVQA(398).\n\nThe images in the test sets didn't appear in any thrust's training set.", "### Annotation Details\n* image_path: name of the image file in corresponding folder. (train_images.zip, train_images_small.zip, test_images_large.zip, test_images_balanced.zip)\n* question: question string\n* answer: answer string, may containing several words\n* source_image_id: source of the image\n* sample_dataset: source of the question\n* sample_question_id: question_id in the original dataset, could be None\n\nThe last three attributes are only for tracking and evaluation by group." ]
[ "TAGS\n#region-us \n", "### Train\nThere are two datasets for router training. router_train.csv contains 49009 samples from 5 datasets CLEVR(10000), VQA-CP v2(10000), TallyQA(10000), GQA(10000), and OKVQA(9009). router_train_small.csv contains 10000 samples from 5 datasets CLEVR(2000), VQA-CP v2(2000), TallyQA(2000), GQA(2000), and OKVQA(2000). \n\nThe small version could be used if 10000 samples are enough to train the router model.", "### Test\nThere are two datasets for final evaluation. mqa_test_large.csv contains 41082 samples from 6 datasets CLEVR(10000), VQA-CP v2(10000), TallyQA(10000), GQA(10000), OKVQA(602), and A-OKVQA(480). mqa_test_balanced.csv contains 5000 samples from 6 datasets CLEVR(1000), VQA-CP v2(1000), TallyQA(1000), GQA(1000), OKVQA(602), and A-OKVQA(398).\n\nThe images in the test sets didn't appear in any thrust's training set.", "### Annotation Details\n* image_path: name of the image file in corresponding folder. (train_images.zip, train_images_small.zip, test_images_large.zip, test_images_balanced.zip)\n* question: question string\n* answer: answer string, may containing several words\n* source_image_id: source of the image\n* sample_dataset: source of the question\n* sample_question_id: question_id in the original dataset, could be None\n\nThe last three attributes are only for tracking and evaluation by group." ]
[ 6, 136, 156, 132 ]
[ "passage: TAGS\n#region-us \n### Train\nThere are two datasets for router training. router_train.csv contains 49009 samples from 5 datasets CLEVR(10000), VQA-CP v2(10000), TallyQA(10000), GQA(10000), and OKVQA(9009). router_train_small.csv contains 10000 samples from 5 datasets CLEVR(2000), VQA-CP v2(2000), TallyQA(2000), GQA(2000), and OKVQA(2000). \n\nThe small version could be used if 10000 samples are enough to train the router model.### Test\nThere are two datasets for final evaluation. mqa_test_large.csv contains 41082 samples from 6 datasets CLEVR(10000), VQA-CP v2(10000), TallyQA(10000), GQA(10000), OKVQA(602), and A-OKVQA(480). mqa_test_balanced.csv contains 5000 samples from 6 datasets CLEVR(1000), VQA-CP v2(1000), TallyQA(1000), GQA(1000), OKVQA(602), and A-OKVQA(398).\n\nThe images in the test sets didn't appear in any thrust's training set.### Annotation Details\n* image_path: name of the image file in corresponding folder. (train_images.zip, train_images_small.zip, test_images_large.zip, test_images_balanced.zip)\n* question: question string\n* answer: answer string, may containing several words\n* source_image_id: source of the image\n* sample_dataset: source of the question\n* sample_question_id: question_id in the original dataset, could be None\n\nThe last three attributes are only for tracking and evaluation by group." ]
80b8d1042ad367f757aef3e88ca816e7225787cd
# Dataset Card for "bbc_news_ptbr" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
celsowm/bbc_news_ptbr
[ "region:us" ]
2023-11-04T20:34:33+00:00
{"dataset_info": {"features": [{"name": "categoria", "dtype": "string"}, {"name": "titulo", "dtype": "string"}, {"name": "texto", "dtype": "string"}, {"name": "data", "dtype": "string"}, {"name": "link", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 69873414, "num_examples": 8637}], "download_size": 40455060, "dataset_size": 69873414}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-04T20:34:46+00:00
[]
[]
TAGS #region-us
# Dataset Card for "bbc_news_ptbr" More Information needed
[ "# Dataset Card for \"bbc_news_ptbr\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"bbc_news_ptbr\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"bbc_news_ptbr\"\n\nMore Information needed" ]
39698b7cc79df5f400227b8ff77b797cd4c4417d
# Dataset Card for "kikongo-french-translation" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Svngoku/kikongo-french-translation
[ "language:kg", "language:fr", "region:us" ]
2023-11-05T00:53:54+00:00
{"language": ["kg", "fr"], "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "translation", "dtype": {"translation": {"languages": ["kg", "fr"]}}}], "splits": [{"name": "train", "num_bytes": 21895.010869565216, "num_examples": 588}, {"name": "test", "num_bytes": 5510.989130434783, "num_examples": 148}], "download_size": 25010, "dataset_size": 27406}}
2024-02-06T23:41:06+00:00
[]
[ "kg", "fr" ]
TAGS #language-Kongo #language-French #region-us
# Dataset Card for "kikongo-french-translation" More Information needed
[ "# Dataset Card for \"kikongo-french-translation\"\n\nMore Information needed" ]
[ "TAGS\n#language-Kongo #language-French #region-us \n", "# Dataset Card for \"kikongo-french-translation\"\n\nMore Information needed" ]
[ 17, 20 ]
[ "passage: TAGS\n#language-Kongo #language-French #region-us \n# Dataset Card for \"kikongo-french-translation\"\n\nMore Information needed" ]
95ec9565d21268d4a16ee73cbea2dd82d3437920
# Dataset Card for "open_web_random_5000" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
btt-mining-coalation/open_web_random_5000
[ "region:us" ]
2023-11-05T01:42:24+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "reward_dpo", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 30649367, "num_examples": 5000}], "download_size": 18002442, "dataset_size": 30649367}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-09T14:45:32+00:00
[]
[]
TAGS #region-us
# Dataset Card for "open_web_random_5000" More Information needed
[ "# Dataset Card for \"open_web_random_5000\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"open_web_random_5000\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"open_web_random_5000\"\n\nMore Information needed" ]
d24ea52061aaf9f3dc1feafd916708e04e21680a
# Dataset Card for "red_pajama_random_5000" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
btt-mining-coalation/red_pajama_random_5000
[ "region:us" ]
2023-11-05T01:42:27+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "reward_dpo", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 304835574, "num_examples": 5000}], "download_size": 137390511, "dataset_size": 304835574}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-09T14:45:49+00:00
[]
[]
TAGS #region-us
# Dataset Card for "red_pajama_random_5000" More Information needed
[ "# Dataset Card for \"red_pajama_random_5000\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"red_pajama_random_5000\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"red_pajama_random_5000\"\n\nMore Information needed" ]
ffb36b3bf9de3fda4f9ab393fad92f35717d4461
# Dataset Card for "ola_polyglot_12.8B_t1_data" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
eunbinni/ola_polyglot_12.8B_t1_data
[ "region:us" ]
2023-11-05T01:43:50+00:00
{"dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "instruction", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 691281335, "num_examples": 580812}], "download_size": 399933748, "dataset_size": 691281335}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-05T01:44:26+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ola_polyglot_12.8B_t1_data" More Information needed
[ "# Dataset Card for \"ola_polyglot_12.8B_t1_data\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ola_polyglot_12.8B_t1_data\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ola_polyglot_12.8B_t1_data\"\n\nMore Information needed" ]
52920dbc1b5d1eb89c0b42339557ef3d0f60c2a0
# Dataset Card for "ola_llama2_7B_t2_data" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
eunbinni/ola_llama2_7B_t2_data
[ "region:us" ]
2023-11-05T02:02:55+00:00
{"dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "instruction", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 691281335, "num_examples": 580812}], "download_size": 399933748, "dataset_size": 691281335}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-05T02:03:33+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ola_llama2_7B_t2_data" More Information needed
[ "# Dataset Card for \"ola_llama2_7B_t2_data\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ola_llama2_7B_t2_data\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ola_llama2_7B_t2_data\"\n\nMore Information needed" ]
63c50191e801c4d73af36119e1c15d14eafe617e
# Dataset Card for "mlcb-lite" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
nathanReitinger/mlcb-lite
[ "region:us" ]
2023-11-05T02:21:49+00:00
{"dataset_info": {"features": [{"name": "label", "dtype": "int64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 530086607, "num_examples": 4681}, {"name": "test", "num_bytes": 60210047, "num_examples": 521}], "download_size": 224023623, "dataset_size": 590296654}}
2023-11-05T02:38:26+00:00
[]
[]
TAGS #region-us
# Dataset Card for "mlcb-lite" More Information needed
[ "# Dataset Card for \"mlcb-lite\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"mlcb-lite\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"mlcb-lite\"\n\nMore Information needed" ]
6524f98c3bf46b2975303bd70e96fb1a7de9b18b
Trying to add dataset files
nautilus06/QVHighlights_preprocessed
[ "size_categories:1K<n<10K", "language:en", "license:mit", "video", "highlight detection", "moment retrieval", "region:us" ]
2023-11-05T02:26:19+00:00
{"language": ["en"], "license": "mit", "size_categories": ["1K<n<10K"], "pretty_name": "QVHighlights preprocessed", "tags": ["video", "highlight detection", "moment retrieval"]}
2023-12-21T07:21:10+00:00
[]
[ "en" ]
TAGS #size_categories-1K<n<10K #language-English #license-mit #video #highlight detection #moment retrieval #region-us
Trying to add dataset files
[]
[ "TAGS\n#size_categories-1K<n<10K #language-English #license-mit #video #highlight detection #moment retrieval #region-us \n" ]
[ 39 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #language-English #license-mit #video #highlight detection #moment retrieval #region-us \n" ]
908ce70796b812d08859682c0339b38d35c3dee1
Used to detect severely stylistically corrupted anime images generated by Stable Diffusion. There are two classes: `corrupted` and `normal`, comprising 61,100 and 63,004 images, respectively. For the `corrupted` type, it includes several cases such as: * Entirely black images * Mosaics or stylistic anomalies due to low sampling steps * Stylistic anomalies due to excessively weighted tags * Stylistic anomalies due to embedding with excessive weights It's important to note that the following content is not included: * Distortions in facial and hand details caused by resolution issues * Distortions in human and object structures For the `normal` type, it contains regular AI-generated images as well as approximately 15,000 images hand-drawn by humans.
deepghs/ai_image_corrupted
[ "task_categories:image-classification", "size_categories:100K<n<1M", "license:openrail", "art", "region:us" ]
2023-11-05T02:33:53+00:00
{"license": "openrail", "size_categories": ["100K<n<1M"], "task_categories": ["image-classification"], "tags": ["art"]}
2023-11-08T04:50:39+00:00
[]
[]
TAGS #task_categories-image-classification #size_categories-100K<n<1M #license-openrail #art #region-us
Used to detect severely stylistically corrupted anime images generated by Stable Diffusion. There are two classes: 'corrupted' and 'normal', comprising 61,100 and 63,004 images, respectively. For the 'corrupted' type, it includes several cases such as: * Entirely black images * Mosaics or stylistic anomalies due to low sampling steps * Stylistic anomalies due to excessively weighted tags * Stylistic anomalies due to embedding with excessive weights It's important to note that the following content is not included: * Distortions in facial and hand details caused by resolution issues * Distortions in human and object structures For the 'normal' type, it contains regular AI-generated images as well as approximately 15,000 images hand-drawn by humans.
[]
[ "TAGS\n#task_categories-image-classification #size_categories-100K<n<1M #license-openrail #art #region-us \n" ]
[ 37 ]
[ "passage: TAGS\n#task_categories-image-classification #size_categories-100K<n<1M #license-openrail #art #region-us \n" ]
19d95986f2c93ce36118a958676c04e259ff3d70
# Dataset Card for "rlhf_cleaned_prompt" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ThWu/rlhf_cleaned_prompt
[ "region:us" ]
2023-11-05T02:39:46+00:00
{"dataset_info": {"features": [{"name": "conversations", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 105144157, "num_examples": 182874}], "download_size": 64894627, "dataset_size": 105144157}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-05T02:40:00+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rlhf_cleaned_prompt" More Information needed
[ "# Dataset Card for \"rlhf_cleaned_prompt\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rlhf_cleaned_prompt\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rlhf_cleaned_prompt\"\n\nMore Information needed" ]
877f603922d1c95c6488f5431826ed66bdc6c571
# Dataset Card for "exampledataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
PepBun/exampledataset
[ "region:us" ]
2023-11-05T02:56:40+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4954127.0, "num_examples": 5}], "download_size": 4936855, "dataset_size": 4954127.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-05T02:56:42+00:00
[]
[]
TAGS #region-us
# Dataset Card for "exampledataset" More Information needed
[ "# Dataset Card for \"exampledataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"exampledataset\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"exampledataset\"\n\nMore Information needed" ]
90e79d8d9cfcdb8a797cae2824be397944672f0d
# Dataset Card for "exampledataset2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
PepBun/exampledataset2
[ "region:us" ]
2023-11-05T02:57:42+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 28604165.0, "num_examples": 24}], "download_size": 28492538, "dataset_size": 28604165.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-05T03:13:53+00:00
[]
[]
TAGS #region-us
# Dataset Card for "exampledataset2" More Information needed
[ "# Dataset Card for \"exampledataset2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"exampledataset2\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"exampledataset2\"\n\nMore Information needed" ]
32408f41d024ef3b2c014fea3ff90556bc4c54bf
# Dataset Card for "exampledataset3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
PepBun/exampledataset3
[ "region:us" ]
2023-11-05T03:15:14+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 28604165.0, "num_examples": 24}], "download_size": 28492538, "dataset_size": 28604165.0}}
2023-11-05T03:15:19+00:00
[]
[]
TAGS #region-us
# Dataset Card for "exampledataset3" More Information needed
[ "# Dataset Card for \"exampledataset3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"exampledataset3\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"exampledataset3\"\n\nMore Information needed" ]
ed2a16b6587dc6145fd25f93f2722cef960151b5
# Dataset Card for "s2t-augmented-data" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
trongnt/s2t-augmented-data
[ "region:us" ]
2023-11-05T03:22:28+00:00
{"dataset_info": {"features": [{"name": "speech", "sequence": "float64"}, {"name": "sampling_rate", "dtype": "int64"}, {"name": "target_text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4179831876.0281487, "num_examples": 8089}, {"name": "test", "num_bytes": 464540592.97185135, "num_examples": 899}], "download_size": 2076823008, "dataset_size": 4644372469.0}}
2023-11-05T03:24:06+00:00
[]
[]
TAGS #region-us
# Dataset Card for "s2t-augmented-data" More Information needed
[ "# Dataset Card for \"s2t-augmented-data\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"s2t-augmented-data\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"s2t-augmented-data\"\n\nMore Information needed" ]
e257aa9cd67ee6bfc5fc657b1c2a8f7a9af72529
# Dataset Card for "building_detection" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
spr1916/building_detection
[ "region:us" ]
2023-11-05T03:23:33+00:00
{"dataset_info": {"features": [{"name": "image_id", "dtype": "int64"}, {"name": "image", "dtype": "string"}, {"name": "width", "dtype": "int64"}, {"name": "height", "dtype": "int64"}, {"name": "objects", "struct": [{"name": "area", "sequence": "int64"}, {"name": "bbox", "sequence": {"sequence": "float64"}}, {"name": "category", "sequence": "int64"}, {"name": "id", "sequence": "int64"}]}], "splits": [{"name": "train", "num_bytes": 1427880, "num_examples": 5000}], "download_size": 547367, "dataset_size": 1427880}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-05T03:23:37+00:00
[]
[]
TAGS #region-us
# Dataset Card for "building_detection" More Information needed
[ "# Dataset Card for \"building_detection\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"building_detection\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"building_detection\"\n\nMore Information needed" ]
9f1349f347ef64f342fa9e77d475ea5ad884f7ca
# Dataset Card for "instruct_v3_5k_and_lima" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
aditijha/instruct_v3_5k_and_lima
[ "region:us" ]
2023-11-05T03:28:29+00:00
{"dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "response", "dtype": "string"}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 22803070, "num_examples": 6000}], "download_size": 13069762, "dataset_size": 22803070}}
2023-11-05T03:28:31+00:00
[]
[]
TAGS #region-us
# Dataset Card for "instruct_v3_5k_and_lima" More Information needed
[ "# Dataset Card for \"instruct_v3_5k_and_lima\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"instruct_v3_5k_and_lima\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"instruct_v3_5k_and_lima\"\n\nMore Information needed" ]
d80ab67540571d7506f5d55d846b696984856eb3
# Dataset Card for "instruct_v1_1k_and_lima" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
aditijha/instruct_v1_1k_and_lima
[ "region:us" ]
2023-11-05T03:29:40+00:00
{"dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "response", "dtype": "string"}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3698244, "num_examples": 2000}], "download_size": 2042056, "dataset_size": 3698244}}
2023-11-05T03:29:41+00:00
[]
[]
TAGS #region-us
# Dataset Card for "instruct_v1_1k_and_lima" More Information needed
[ "# Dataset Card for \"instruct_v1_1k_and_lima\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"instruct_v1_1k_and_lima\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"instruct_v1_1k_and_lima\"\n\nMore Information needed" ]
1fa63b999e84524f76d27ac9b07696099f87e51c
# Dataset Card for "rm-static" Split of [hh-static](https://huggingface.co/datasets/Dahoas/static-hh) used for training reward models after supervised fine-tuning.
cryptom/rm-static
[ "region:us" ]
2023-11-05T03:41:58+00:00
{"dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "response", "dtype": "string"}, {"name": "chosen", "dtype": "string"}, {"name": "rejected", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 113850006, "num_examples": 76256}, {"name": "test", "num_bytes": 7649255, "num_examples": 5103}], "download_size": 73006535, "dataset_size": 121499261}}
2023-11-05T04:01:26+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rm-static" Split of hh-static used for training reward models after supervised fine-tuning.
[ "# Dataset Card for \"rm-static\"\n\nSplit of hh-static used for training reward models after supervised fine-tuning." ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rm-static\"\n\nSplit of hh-static used for training reward models after supervised fine-tuning." ]
[ 6, 31 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rm-static\"\n\nSplit of hh-static used for training reward models after supervised fine-tuning." ]
45a7d038c33a49e0a60f0f8832f46109e468eeab
# Description The file `vien-corpus.txt` has 7957186 lines, mixing Vietnamese and English texts. This file was used as a corpus to train a tokenizer for both Vietnamese and English. # Details ## How was this corpus created? 1. English text is from Wikipedia 2. Vietnamese text is from 2 sources: * Crawled data from news websites * Oscar dataset
levuloihust/vien-corpus-for-tokenizer
[ "language:vi", "language:en", "region:us" ]
2023-11-05T04:19:22+00:00
{"language": ["vi", "en"]}
2023-11-05T05:19:10+00:00
[]
[ "vi", "en" ]
TAGS #language-Vietnamese #language-English #region-us
# Description The file 'URL' has 7957186 lines, mixing Vietnamese and English texts. This file was used as a corpus to train a tokenizer for both Vietnamese and English. # Details ## How was this corpus created? 1. English text is from Wikipedia 2. Vietnamese text is from 2 sources: * Crawled data from news websites * Oscar dataset
[ "# Description\nThe file 'URL' has 7957186 lines, mixing Vietnamese and English texts. This file was used as a corpus to train a tokenizer for both Vietnamese and English.", "# Details", "## How was this corpus created?\n1. English text is from Wikipedia\n2. Vietnamese text is from 2 sources:\n* Crawled data from news websites\n* Oscar dataset" ]
[ "TAGS\n#language-Vietnamese #language-English #region-us \n", "# Description\nThe file 'URL' has 7957186 lines, mixing Vietnamese and English texts. This file was used as a corpus to train a tokenizer for both Vietnamese and English.", "# Details", "## How was this corpus created?\n1. English text is from Wikipedia\n2. Vietnamese text is from 2 sources:\n* Crawled data from news websites\n* Oscar dataset" ]
[ 17, 42, 2, 34 ]
[ "passage: TAGS\n#language-Vietnamese #language-English #region-us \n# Description\nThe file 'URL' has 7957186 lines, mixing Vietnamese and English texts. This file was used as a corpus to train a tokenizer for both Vietnamese and English.# Details## How was this corpus created?\n1. English text is from Wikipedia\n2. Vietnamese text is from 2 sources:\n* Crawled data from news websites\n* Oscar dataset" ]
77557aebc28cef8efce70c1dc231b72c7a9a34cf
# Dataset Card for "PetClassification" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Diegulio/PetClassification
[ "region:us" ]
2023-11-05T04:25:38+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "No detectado", "1": "affenpinscher", "2": "afghan_hound", "3": "african_hunting_dog", "4": "airedale", "5": "american_staffordshire_terrier", "6": "appenzeller", "7": "australian_terrier", "8": "basenji", "9": "basset", "10": "beagle", "11": "bedlington_terrier", "12": "bernese_mountain_dog", "13": "black-and-tan_coonhound", "14": "blenheim_spaniel", "15": "bloodhound", "16": "bluetick", "17": "border_collie", "18": "border_terrier", "19": "borzoi", "20": "boston_bull", "21": "bouvier_des_flandres", "22": "boxer", "23": "brabancon_griffon", "24": "briard", "25": "brittany_spaniel", "26": "bull_mastiff", "27": "cairn", "28": "cardigan", "29": "chesapeake_bay_retriever", "30": "chihuahua", "31": "chow", "32": "clumber", "33": "cocker_spaniel", "34": "collie", "35": "curly-coated_retriever", "36": "dandie_dinmont", "37": "dhole", "38": "dingo", "39": "doberman", "40": "english_foxhound", "41": "english_setter", "42": "english_springer", "43": "entlebucher", "44": "eskimo_dog", "45": "flat-coated_retriever", "46": "french_bulldog", "47": "gato", "48": "german_shepherd", "49": "german_short-haired_pointer", "50": "giant_schnauzer", "51": "golden_retriever", "52": "gordon_setter", "53": "great_dane", "54": "great_pyrenees", "55": "greater_swiss_mountain_dog", "56": "groenendael", "57": "ibizan_hound", "58": "irish_setter", "59": "irish_terrier", "60": "irish_water_spaniel", "61": "irish_wolfhound", "62": "italian_greyhound", "63": "japanese_spaniel", "64": "keeshond", "65": "kelpie", "66": "kerry_blue_terrier", "67": "komondor", "68": "kuvasz", "69": "labrador_retriever", "70": "lakeland_terrier", "71": "leonberg", "72": "lhasa", "73": "malamute", "74": "malinois", "75": "maltese_dog", "76": "mexican_hairless", "77": "miniature_pinscher", "78": "miniature_poodle", "79": "miniature_schnauzer", "80": "newfoundland", "81": "norfolk_terrier", "82": "norwegian_elkhound", "83": "norwich_terrier", "84": "old_english_sheepdog", "85": "otterhound", "86": "papillon", "87": "pekinese", "88": "pembroke", "89": "pomeranian", "90": "pug", "91": "redbone", "92": "rhodesian_ridgeback", "93": "rottweiler", "94": "saint_bernard", "95": "saluki", "96": "samoyed", "97": "schipperke", "98": "scotch_terrier", "99": "scottish_deerhound", "100": "sealyham_terrier", "101": "shetland_sheepdog", "102": "shih-tzu", "103": "siberian_husky", "104": "silky_terrier", "105": "soft-coated_wheaten_terrier", "106": "staffordshire_bullterrier", "107": "standard_poodle", "108": "standard_schnauzer", "109": "sussex_spaniel", "110": "tibetan_mastiff", "111": "tibetan_terrier", "112": "toy_poodle", "113": "toy_terrier", "114": "vizsla", "115": "walker_hound", "116": "weimaraner", "117": "welsh_springer_spaniel", "118": "west_highland_white_terrier", "119": "whippet", "120": "wire-haired_fox_terrier", "121": "yorkshire_terrier"}}}}], "splits": [{"name": "train", "num_bytes": 344179685.94, "num_examples": 7499}, {"name": "validation", "num_bytes": 29205702.0, "num_examples": 834}, {"name": "test", "num_bytes": 81732756.983, "num_examples": 2083}], "download_size": 379294077, "dataset_size": 455118144.923}}
2023-11-05T04:26:06+00:00
[]
[]
TAGS #region-us
# Dataset Card for "PetClassification" More Information needed
[ "# Dataset Card for \"PetClassification\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"PetClassification\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"PetClassification\"\n\nMore Information needed" ]
fc5e2902c6ec9d88ea16e69f588e40a4f1d0ebb6
# Dataset Card for "arxiv-cs.LG-23" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
theblackcat102/arxiv-cs.LG-23
[ "region:us" ]
2023-11-05T04:47:53+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "updated", "dtype": "timestamp[s]"}, {"name": "published", "dtype": "timestamp[s]"}, {"name": "title", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "author", "sequence": "string"}, {"name": "arxiv:doi", "dtype": "string"}, {"name": "link", "list": [{"name": "@title", "dtype": "string"}, {"name": "@href", "dtype": "string"}, {"name": "@rel", "dtype": "string"}, {"name": "@type", "dtype": "string"}]}, {"name": "arxiv:journal_ref", "sequence": "string"}, {"name": "arxiv:primary_category", "struct": [{"name": "@xmlns:arxiv", "dtype": "string"}, {"name": "@term", "dtype": "string"}, {"name": "@scheme", "dtype": "string"}]}, {"name": "category", "sequence": "string"}, {"name": "content", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1399278156, "num_examples": 21394}], "download_size": 744792190, "dataset_size": 1399278156}}
2023-11-05T04:50:24+00:00
[]
[]
TAGS #region-us
# Dataset Card for "arxiv-cs.LG-23" More Information needed
[ "# Dataset Card for \"arxiv-cs.LG-23\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"arxiv-cs.LG-23\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"arxiv-cs.LG-23\"\n\nMore Information needed" ]
ba1c1f6385fc65fe4de6ff8ea62d707e9e28db49
# Dataset Card for "wikilingua_data-temario_results" rouge={'rouge1': 0.17417346657091554, 'rouge2': 0.05244434884193, 'rougeL': 0.11143891313862225, 'rougeLsum': 0.11143891313862225} Bert={'precision': 0.6341577677623086, 'recall': 0.7350342140413835, 'f1': 0.6800217146312832} moverscore 0.5511240248681097
arthurmluz/wikilingua_data-temario_results
[ "region:us" ]
2023-11-05T05:15:48+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 31900191, "num_examples": 8165}], "download_size": 19378476, "dataset_size": 31900191}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-13T19:24:46+00:00
[]
[]
TAGS #region-us
# Dataset Card for "wikilingua_data-temario_results" rouge={'rouge1': 0.17417346657091554, 'rouge2': 0.05244434884193, 'rougeL': 0.11143891313862225, 'rougeLsum': 0.11143891313862225} Bert={'precision': 0.6341577677623086, 'recall': 0.7350342140413835, 'f1': 0.6800217146312832} moverscore 0.5511240248681097
[ "# Dataset Card for \"wikilingua_data-temario_results\"\n\nrouge={'rouge1': 0.17417346657091554, 'rouge2': 0.05244434884193, 'rougeL': 0.11143891313862225, 'rougeLsum': 0.11143891313862225}\n\nBert={'precision': 0.6341577677623086, 'recall': 0.7350342140413835, 'f1': 0.6800217146312832}\n\nmoverscore 0.5511240248681097" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"wikilingua_data-temario_results\"\n\nrouge={'rouge1': 0.17417346657091554, 'rouge2': 0.05244434884193, 'rougeL': 0.11143891313862225, 'rougeLsum': 0.11143891313862225}\n\nBert={'precision': 0.6341577677623086, 'recall': 0.7350342140413835, 'f1': 0.6800217146312832}\n\nmoverscore 0.5511240248681097" ]
[ 6, 136 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"wikilingua_data-temario_results\"\n\nrouge={'rouge1': 0.17417346657091554, 'rouge2': 0.05244434884193, 'rougeL': 0.11143891313862225, 'rougeLsum': 0.11143891313862225}\n\nBert={'precision': 0.6341577677623086, 'recall': 0.7350342140413835, 'f1': 0.6800217146312832}\n\nmoverscore 0.5511240248681097" ]
ec45c1a75c024818e55c34409e10b423c246404d
# Dataset Card for Evaluation run of TehVenom/Moderator-Chan_GPT-JT-6b ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/TehVenom/Moderator-Chan_GPT-JT-6b - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [TehVenom/Moderator-Chan_GPT-JT-6b](https://huggingface.co/TehVenom/Moderator-Chan_GPT-JT-6b) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_TehVenom__Moderator-Chan_GPT-JT-6b_public", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-11-06T16:05:16.771792](https://huggingface.co/datasets/open-llm-leaderboard/details_TehVenom__Moderator-Chan_GPT-JT-6b_public/blob/main/results_2023-11-06T16-05-16.771792.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "em": 0.0008389261744966443, "em_stderr": 0.0002964962989801249, "f1": 0.0455861996644295, "f1_stderr": 0.001167270115698605, "acc": 0.33438429175196105, "acc_stderr": 0.008229511585752802 }, "harness|drop|3": { "em": 0.0008389261744966443, "em_stderr": 0.0002964962989801249, "f1": 0.0455861996644295, "f1_stderr": 0.001167270115698605 }, "harness|gsm8k|5": { "acc": 0.01288855193328279, "acc_stderr": 0.0031069012664996704 }, "harness|winogrande|5": { "acc": 0.6558800315706393, "acc_stderr": 0.013352121905005935 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_TehVenom__Moderator-Chan_GPT-JT-6b
[ "region:us" ]
2023-11-05T05:29:54+00:00
{"pretty_name": "Evaluation run of TehVenom/Moderator-Chan_GPT-JT-6b", "dataset_summary": "Dataset automatically created during the evaluation run of model [TehVenom/Moderator-Chan_GPT-JT-6b](https://huggingface.co/TehVenom/Moderator-Chan_GPT-JT-6b) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_TehVenom__Moderator-Chan_GPT-JT-6b_public\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-11-06T16:05:16.771792](https://huggingface.co/datasets/open-llm-leaderboard/details_TehVenom__Moderator-Chan_GPT-JT-6b_public/blob/main/results_2023-11-06T16-05-16.771792.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"em\": 0.0008389261744966443,\n \"em_stderr\": 0.0002964962989801249,\n \"f1\": 0.0455861996644295,\n \"f1_stderr\": 0.001167270115698605,\n \"acc\": 0.33438429175196105,\n \"acc_stderr\": 0.008229511585752802\n },\n \"harness|drop|3\": {\n \"em\": 0.0008389261744966443,\n \"em_stderr\": 0.0002964962989801249,\n \"f1\": 0.0455861996644295,\n \"f1_stderr\": 0.001167270115698605\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.01288855193328279,\n \"acc_stderr\": 0.0031069012664996704\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.6558800315706393,\n \"acc_stderr\": 0.013352121905005935\n }\n}\n```", "repo_url": "https://huggingface.co/TehVenom/Moderator-Chan_GPT-JT-6b", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_drop_3", "data_files": [{"split": "2023_11_05T05_29_39.737368", "path": ["**/details_harness|drop|3_2023-11-05T05-29-39.737368.parquet"]}, {"split": "2023_11_06T16_05_16.771792", "path": ["**/details_harness|drop|3_2023-11-06T16-05-16.771792.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-11-06T16-05-16.771792.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_11_05T05_29_39.737368", "path": ["**/details_harness|gsm8k|5_2023-11-05T05-29-39.737368.parquet"]}, {"split": "2023_11_06T16_05_16.771792", "path": ["**/details_harness|gsm8k|5_2023-11-06T16-05-16.771792.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-11-06T16-05-16.771792.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_11_05T05_29_39.737368", "path": ["**/details_harness|winogrande|5_2023-11-05T05-29-39.737368.parquet"]}, {"split": "2023_11_06T16_05_16.771792", "path": ["**/details_harness|winogrande|5_2023-11-06T16-05-16.771792.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-11-06T16-05-16.771792.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_11_05T05_29_39.737368", "path": ["results_2023-11-05T05-29-39.737368.parquet"]}, {"split": "2023_11_06T16_05_16.771792", "path": ["results_2023-11-06T16-05-16.771792.parquet"]}, {"split": "latest", "path": ["results_2023-11-06T16-05-16.771792.parquet"]}]}]}
2023-11-06T16:05:35+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of TehVenom/Moderator-Chan_GPT-JT-6b ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model TehVenom/Moderator-Chan_GPT-JT-6b on the Open LLM Leaderboard. The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-11-06T16:05:16.771792(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of TehVenom/Moderator-Chan_GPT-JT-6b", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model TehVenom/Moderator-Chan_GPT-JT-6b on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-11-06T16:05:16.771792(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of TehVenom/Moderator-Chan_GPT-JT-6b", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model TehVenom/Moderator-Chan_GPT-JT-6b on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-11-06T16:05:16.771792(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 25, 31, 174, 66, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of TehVenom/Moderator-Chan_GPT-JT-6b## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model TehVenom/Moderator-Chan_GPT-JT-6b on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-11-06T16:05:16.771792(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
7f1b00710fc473cdee0f9b0f98cc3e4c915785fd
# AI/Tech Dataset This dataset is a collection of AI/tech articles scraped from the web. It's hosted on [HuggingFace Datasets](https://huggingface.co/datasets/siavava/ai-tech-articles), so it is easier to load in and work with. ## To load the dataset ### 1. Install [HuggingFace Datasets](https://huggingface.co/docs/datasets/installation.html) ```bash pip install datasets ``` ### 2. Load the dataset ```python from datasets import load_dataset dataset = load_dataset("siavava/ai-tech-articles") # optionally, convert it to a pandas dataframe: df = dataset["train"].to_pandas() ``` You do not need to clone this repo. HuggingFace will download the dataset for you, the first time that you load it, and cache it locally so it does not need to re-download it again (unless it detects a change upstream). ## File Structure - [`analytics.ipynb`](analytics.ipynb) - Notebook containing some details about the dataset. - [`example.ipynb`](example.ipynb) - A minimal notebook that loads in the dataset and converts to Pandas. - [`raw.csv`](raw.csv) - The raw data, in CSV format. - `data/*.parquet`- compressed [parquet](https://www.databricks.com/glossary/what-is-parquet) containing the data. - For raw text files, see the [scraper repo](https://github.com/siavava/scrape.hs) on GitHub.
siavava/ai-tech-articles
[ "task_categories:text-generation", "task_categories:feature-extraction", "language:en", "license:mit", "temporal series data", "language data", "doi:10.57967/hf/1324", "region:us" ]
2023-11-05T05:56:04+00:00
{"language": ["en"], "license": "mit", "task_categories": ["text-generation", "feature-extraction"], "pretty_name": "AI/Technology Articles", "tags": ["temporal series data", "language data"], "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "year", "dtype": "int64"}, {"name": "title", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 180820047, "num_examples": 17092}], "download_size": 81702923, "dataset_size": 180820047}}
2023-11-19T14:06:56+00:00
[]
[ "en" ]
TAGS #task_categories-text-generation #task_categories-feature-extraction #language-English #license-mit #temporal series data #language data #doi-10.57967/hf/1324 #region-us
# AI/Tech Dataset This dataset is a collection of AI/tech articles scraped from the web. It's hosted on HuggingFace Datasets, so it is easier to load in and work with. ## To load the dataset ### 1. Install HuggingFace Datasets ### 2. Load the dataset You do not need to clone this repo. HuggingFace will download the dataset for you, the first time that you load it, and cache it locally so it does not need to re-download it again (unless it detects a change upstream). ## File Structure - 'URL' - Notebook containing some details about the dataset. - 'URL' - A minimal notebook that loads in the dataset and converts to Pandas. - 'URL' - The raw data, in CSV format. - 'data/*.parquet'- compressed parquet containing the data. - For raw text files, see the scraper repo on GitHub.
[ "# AI/Tech Dataset\n\nThis dataset is a collection of AI/tech articles scraped from the web.\n\nIt's hosted on HuggingFace Datasets, so it is easier to load in and work with.", "## To load the dataset", "### 1. Install HuggingFace Datasets", "### 2. Load the dataset\n\n\n\nYou do not need to clone this repo.\nHuggingFace will download the dataset for you, the first time that you load it,\nand cache it locally so it does not need to re-download it again\n(unless it detects a change upstream).", "## File Structure\n\n- 'URL' - Notebook containing some details about the dataset.\n- 'URL' - A minimal notebook that loads in the dataset and converts to Pandas.\n- 'URL' - The raw data, in CSV format.\n- 'data/*.parquet'- compressed parquet containing the data.\n- For raw text files, see the scraper repo on GitHub." ]
[ "TAGS\n#task_categories-text-generation #task_categories-feature-extraction #language-English #license-mit #temporal series data #language data #doi-10.57967/hf/1324 #region-us \n", "# AI/Tech Dataset\n\nThis dataset is a collection of AI/tech articles scraped from the web.\n\nIt's hosted on HuggingFace Datasets, so it is easier to load in and work with.", "## To load the dataset", "### 1. Install HuggingFace Datasets", "### 2. Load the dataset\n\n\n\nYou do not need to clone this repo.\nHuggingFace will download the dataset for you, the first time that you load it,\nand cache it locally so it does not need to re-download it again\n(unless it detects a change upstream).", "## File Structure\n\n- 'URL' - Notebook containing some details about the dataset.\n- 'URL' - A minimal notebook that loads in the dataset and converts to Pandas.\n- 'URL' - The raw data, in CSV format.\n- 'data/*.parquet'- compressed parquet containing the data.\n- For raw text files, see the scraper repo on GitHub." ]
[ 58, 48, 6, 11, 65, 92 ]
[ "passage: TAGS\n#task_categories-text-generation #task_categories-feature-extraction #language-English #license-mit #temporal series data #language data #doi-10.57967/hf/1324 #region-us \n# AI/Tech Dataset\n\nThis dataset is a collection of AI/tech articles scraped from the web.\n\nIt's hosted on HuggingFace Datasets, so it is easier to load in and work with.## To load the dataset### 1. Install HuggingFace Datasets### 2. Load the dataset\n\n\n\nYou do not need to clone this repo.\nHuggingFace will download the dataset for you, the first time that you load it,\nand cache it locally so it does not need to re-download it again\n(unless it detects a change upstream).## File Structure\n\n- 'URL' - Notebook containing some details about the dataset.\n- 'URL' - A minimal notebook that loads in the dataset and converts to Pandas.\n- 'URL' - The raw data, in CSV format.\n- 'data/*.parquet'- compressed parquet containing the data.\n- For raw text files, see the scraper repo on GitHub." ]
a46050bca835a48cc06439445e3bc35646aec0c3
# Dataset Card for "ola_llama2_7B_t1_data" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
eunbinni/ola_llama2_7B_t1_data
[ "region:us" ]
2023-11-05T06:08:27+00:00
{"dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "instruction", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 691281335, "num_examples": 580812}], "download_size": 399933748, "dataset_size": 691281335}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-05T06:09:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ola_llama2_7B_t1_data" More Information needed
[ "# Dataset Card for \"ola_llama2_7B_t1_data\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ola_llama2_7B_t1_data\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ola_llama2_7B_t1_data\"\n\nMore Information needed" ]
a0b681f5db8c9063464f59ce8da2b7f3f22fb248
# Dataset Card for Evaluation run of TheBloke/wizard-vicuna-13B-GPTQ ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/TheBloke/wizard-vicuna-13B-GPTQ - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [TheBloke/wizard-vicuna-13B-GPTQ](https://huggingface.co/TheBloke/wizard-vicuna-13B-GPTQ) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_TheBloke__wizard-vicuna-13B-GPTQ_public", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-11-07T21:40:44.837005](https://huggingface.co/datasets/open-llm-leaderboard/details_TheBloke__wizard-vicuna-13B-GPTQ_public/blob/main/results_2023-11-07T21-40-44.837005.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "em": 0.04488255033557047, "em_stderr": 0.0021203463374070692, "f1": 0.11209521812080547, "f1_stderr": 0.002495000900110754, "acc": 0.4218599749995961, "acc_stderr": 0.010168206288804995 }, "harness|drop|3": { "em": 0.04488255033557047, "em_stderr": 0.0021203463374070692, "f1": 0.11209521812080547, "f1_stderr": 0.002495000900110754 }, "harness|gsm8k|5": { "acc": 0.09628506444275967, "acc_stderr": 0.008125264128215884 }, "harness|winogrande|5": { "acc": 0.7474348855564326, "acc_stderr": 0.012211148449394105 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_TheBloke__wizard-vicuna-13B-GPTQ
[ "region:us" ]
2023-11-05T07:13:14+00:00
{"pretty_name": "Evaluation run of TheBloke/wizard-vicuna-13B-GPTQ", "dataset_summary": "Dataset automatically created during the evaluation run of model [TheBloke/wizard-vicuna-13B-GPTQ](https://huggingface.co/TheBloke/wizard-vicuna-13B-GPTQ) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_TheBloke__wizard-vicuna-13B-GPTQ_public\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-11-07T21:40:44.837005](https://huggingface.co/datasets/open-llm-leaderboard/details_TheBloke__wizard-vicuna-13B-GPTQ_public/blob/main/results_2023-11-07T21-40-44.837005.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"em\": 0.04488255033557047,\n \"em_stderr\": 0.0021203463374070692,\n \"f1\": 0.11209521812080547,\n \"f1_stderr\": 0.002495000900110754,\n \"acc\": 0.4218599749995961,\n \"acc_stderr\": 0.010168206288804995\n },\n \"harness|drop|3\": {\n \"em\": 0.04488255033557047,\n \"em_stderr\": 0.0021203463374070692,\n \"f1\": 0.11209521812080547,\n \"f1_stderr\": 0.002495000900110754\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.09628506444275967,\n \"acc_stderr\": 0.008125264128215884\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.7474348855564326,\n \"acc_stderr\": 0.012211148449394105\n }\n}\n```", "repo_url": "https://huggingface.co/TheBloke/wizard-vicuna-13B-GPTQ", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_drop_3", "data_files": [{"split": "2023_11_05T07_12_56.494554", "path": ["**/details_harness|drop|3_2023-11-05T07-12-56.494554.parquet"]}, {"split": "2023_11_07T21_40_44.837005", "path": ["**/details_harness|drop|3_2023-11-07T21-40-44.837005.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-11-07T21-40-44.837005.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_11_05T07_12_56.494554", "path": ["**/details_harness|gsm8k|5_2023-11-05T07-12-56.494554.parquet"]}, {"split": "2023_11_07T21_40_44.837005", "path": ["**/details_harness|gsm8k|5_2023-11-07T21-40-44.837005.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-11-07T21-40-44.837005.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_11_05T07_12_56.494554", "path": ["**/details_harness|winogrande|5_2023-11-05T07-12-56.494554.parquet"]}, {"split": "2023_11_07T21_40_44.837005", "path": ["**/details_harness|winogrande|5_2023-11-07T21-40-44.837005.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-11-07T21-40-44.837005.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_11_05T07_12_56.494554", "path": ["results_2023-11-05T07-12-56.494554.parquet"]}, {"split": "2023_11_07T21_40_44.837005", "path": ["results_2023-11-07T21-40-44.837005.parquet"]}, {"split": "latest", "path": ["results_2023-11-07T21-40-44.837005.parquet"]}]}]}
2023-11-07T21:41:11+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of TheBloke/wizard-vicuna-13B-GPTQ ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model TheBloke/wizard-vicuna-13B-GPTQ on the Open LLM Leaderboard. The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-11-07T21:40:44.837005(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of TheBloke/wizard-vicuna-13B-GPTQ", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model TheBloke/wizard-vicuna-13B-GPTQ on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-11-07T21:40:44.837005(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of TheBloke/wizard-vicuna-13B-GPTQ", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model TheBloke/wizard-vicuna-13B-GPTQ on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-11-07T21:40:44.837005(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 24, 31, 173, 66, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of TheBloke/wizard-vicuna-13B-GPTQ## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model TheBloke/wizard-vicuna-13B-GPTQ on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-11-07T21:40:44.837005(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
d92510598ab510acf4a12b24b574d178c42e5da0
# Dataset Card for "ola_llama2_13B_t1_data" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
eunbinni/ola_llama2_13B_t1_data
[ "region:us" ]
2023-11-05T07:14:15+00:00
{"dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "instruction", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 691281335, "num_examples": 580812}], "download_size": 399933748, "dataset_size": 691281335}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-05T07:14:54+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ola_llama2_13B_t1_data" More Information needed
[ "# Dataset Card for \"ola_llama2_13B_t1_data\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ola_llama2_13B_t1_data\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ola_llama2_13B_t1_data\"\n\nMore Information needed" ]
207b97804d61934d402d82c348e8a8f9672a7611
# Dataset Card for Dataset Name <!-- Provide a quick summary of the dataset. --> This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
zjhqss/test
[ "task_categories:table-question-answering", "size_categories:n<1K", "license:mit", "region:us" ]
2023-11-05T07:55:11+00:00
{"license": "mit", "size_categories": ["n<1K"], "task_categories": ["table-question-answering"]}
2023-11-05T08:05:14+00:00
[]
[]
TAGS #task_categories-table-question-answering #size_categories-n<1K #license-mit #region-us
# Dataset Card for Dataset Name This dataset card aims to be a base template for new datasets. It has been generated using this raw template. ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#task_categories-table-question-answering #size_categories-n<1K #license-mit #region-us \n", "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 35, 34, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#task_categories-table-question-answering #size_categories-n<1K #license-mit #region-us \n# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
513400919a92d0cb5fe476cb5cc83691ddbd4dad
# Dataset Card for "usda_tokenized_target" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
passionMan/usda_tokenized_target
[ "region:us" ]
2023-11-05T08:05:12+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 541970, "num_examples": 2527}, {"name": "test", "num_bytes": 180736, "num_examples": 843}], "download_size": 136249, "dataset_size": 722706}}
2023-11-05T08:05:18+00:00
[]
[]
TAGS #region-us
# Dataset Card for "usda_tokenized_target" More Information needed
[ "# Dataset Card for \"usda_tokenized_target\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"usda_tokenized_target\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"usda_tokenized_target\"\n\nMore Information needed" ]
da529e3f0396e0461daff2b92b56b7dd5f9134ba
# Dataset Card for "uspto-full" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Phando/uspto-full
[ "region:us" ]
2023-11-05T09:09:55+00:00
{"dataset_info": {"features": [{"name": "PatentNumber", "dtype": "string"}, {"name": "Year", "dtype": "int64"}, {"name": "reactions", "dtype": "string"}, {"name": "canonical_reactions", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 519191703, "num_examples": 1808937}], "download_size": 144493447, "dataset_size": 519191703}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-05T10:40:06+00:00
[]
[]
TAGS #region-us
# Dataset Card for "uspto-full" More Information needed
[ "# Dataset Card for \"uspto-full\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"uspto-full\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"uspto-full\"\n\nMore Information needed" ]
0415b5ccd5fcbb6f98afb5f77fd9fc03246a3e74
# Dataset Card for "eurlexsum_ita_cleaned_16384_184" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
gianma/eurlexsum_ita_cleaned_16384_184
[ "region:us" ]
2023-11-05T09:17:23+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "is_camera", "dtype": "bool"}, {"name": "reference", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "tokenized_len_total", "dtype": "int64"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 8477005, "num_examples": 463}, {"name": "validation", "num_bytes": 467050, "num_examples": 27}, {"name": "test", "num_bytes": 507371, "num_examples": 27}], "download_size": 3990397, "dataset_size": 9451426}}
2023-11-05T11:55:15+00:00
[]
[]
TAGS #region-us
# Dataset Card for "eurlexsum_ita_cleaned_16384_184" More Information needed
[ "# Dataset Card for \"eurlexsum_ita_cleaned_16384_184\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"eurlexsum_ita_cleaned_16384_184\"\n\nMore Information needed" ]
[ 6, 24 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"eurlexsum_ita_cleaned_16384_184\"\n\nMore Information needed" ]