sha
stringlengths
40
40
text
stringlengths
0
13.4M
id
stringlengths
2
117
tags
list
created_at
stringlengths
25
25
metadata
stringlengths
2
31.7M
last_modified
stringlengths
25
25
79de22f57931bf49c1c0b5890d0f713f513de5b8
# Hurybone Style Embedding / Textual Inversion <img alt="Showcase" src="https://huggingface.co/datasets/Nerfgun3/hurybone_style/resolve/main/hurybone_showcase.png"/> ## Usage To use this embedding you have to download the file aswell as drop it into the "\stable-diffusion-webui\embeddings" folder To use it in a prompt: ```"hurybone_style"``` Personally, I would recommend to use my embeddings with a strength of 0.8, like ```"(hurybone_style:0.8)"``` I hope you enjoy the embedding. If you have any questions, you can ask me anything via Discord: "Nerfgun3#7508" ## License This embedding is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: 1. You can't use the embedding to deliberately produce nor share illegal or harmful outputs or content 2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license 3. You may re-distribute the weights and use the embedding commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) [Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license)
Nerfgun3/hurybone_style
[ "language:en", "license:creativeml-openrail-m", "stable-diffusion", "text-to-image", "image-to-image", "region:us" ]
2022-12-27T22:53:49+00:00
{"language": ["en"], "license": "creativeml-openrail-m", "thumbnail": "https://huggingface.co/datasets/Nerfgun3/hurybone_style/resolve/main/hurybone_showcase.png", "tags": ["stable-diffusion", "text-to-image", "image-to-image"], "inference": false}
2022-12-27T22:59:20+00:00
404a26a5d24473d6c5fad7ec3da6cdea22eda285
# Iskou Style Embedding / Textual Inversion <img alt="Showcase" src="https://huggingface.co/datasets/Nerfgun3/iskou_style/resolve/main/iskou_showcase.png"/> ## Usage To use this embedding you have to download the file aswell as drop it into the "\stable-diffusion-webui\embeddings" folder To use it in a prompt: ```"iskou_style"``` Personally, I would recommend to use my embeddings with a strength of 0.8, like ```"(iskou_style:0.8)"``` I hope you enjoy the embedding. If you have any questions, you can ask me anything via Discord: "Nerfgun3#7508" ## License This embedding is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: 1. You can't use the embedding to deliberately produce nor share illegal or harmful outputs or content 2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license 3. You may re-distribute the weights and use the embedding commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) [Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license)
Nerfgun3/iskou_style
[ "language:en", "license:creativeml-openrail-m", "stable-diffusion", "text-to-image", "image-to-image", "region:us" ]
2022-12-27T22:53:57+00:00
{"language": ["en"], "license": "creativeml-openrail-m", "thumbnail": "https://huggingface.co/datasets/Nerfgun3/iskou_style/resolve/main/iskou_showcase.png", "tags": ["stable-diffusion", "text-to-image", "image-to-image"], "inference": false}
2022-12-27T23:00:25+00:00
073c1e5ecb6d0df09108909d20deba6fe5e8adf4
# Saska Style Embedding / Textual Inversion <img alt="Showcase" src="https://huggingface.co/datasets/Nerfgun3/saska_style/resolve/main/saska_showcase.png"/> ## Usage To use this embedding you have to download the file aswell as drop it into the "\stable-diffusion-webui\embeddings" folder To use it in a prompt: ```"saska_style"``` Personally, I would recommend to use my embeddings with a strength of 0.8, like ```"(saska_style:0.8)"``` I trained the embedding two epochs until 8000 steps. I hope you enjoy the embedding. If you have any questions, you can ask me anything via Discord: "Nerfgun3#7508" ## License This embedding is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: 1. You can't use the embedding to deliberately produce nor share illegal or harmful outputs or content 2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license 3. You may re-distribute the weights and use the embedding commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) [Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license)
Nerfgun3/saska_style
[ "language:en", "license:creativeml-openrail-m", "stable-diffusion", "text-to-image", "image-to-image", "region:us" ]
2022-12-27T22:54:04+00:00
{"language": ["en"], "license": "creativeml-openrail-m", "thumbnail": "https://huggingface.co/datasets/Nerfgun3/saska_style/resolve/main/saska_showcase.png", "tags": ["stable-diffusion", "text-to-image", "image-to-image"], "inference": false}
2022-12-27T22:58:22+00:00
d431d825eab9fd83356bc9aa98db08c58e902006
# Star Style Embedding / Textual Inversion <img alt="Showcase" src="https://huggingface.co/datasets/Nerfgun3/star_style/resolve/main/star_showcase.png"/> ## Usage To use this embedding you have to download the file aswell as drop it into the "\stable-diffusion-webui\embeddings" folder To use it in a prompt: ```"star_style"``` Personally, I would recommend to use my embeddings with a strength of 0.8, like ```"(star_style:0.8)"``` This embedding can be used for characters aswell! Just use it with a strength of 0.6 or less! I hope you enjoy the embedding. If you have any questions, you can ask me anything via Discord: "Nerfgun3#7508" ## License This embedding is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: 1. You can't use the embedding to deliberately produce nor share illegal or harmful outputs or content 2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license 3. You may re-distribute the weights and use the embedding commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) [Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license)
Nerfgun3/star_style
[ "language:en", "license:creativeml-openrail-m", "stable-diffusion", "text-to-image", "image-to-image", "region:us" ]
2022-12-27T22:54:12+00:00
{"language": ["en"], "license": "creativeml-openrail-m", "thumbnail": "https://huggingface.co/datasets/Nerfgun3/star_style/resolve/main/star_showcase.png", "tags": ["stable-diffusion", "text-to-image", "image-to-image"], "inference": false}
2022-12-27T22:57:17+00:00
0b0848e5cc8d2b0c180ad4de151c6450f84183ab
# Dataset Card for "4096_filtered_base_code_review" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Dahoas/4096_filtered_base_code_review
[ "region:us" ]
2022-12-27T23:47:12+00:00
{"dataset_info": {"features": [{"name": "body", "dtype": "string"}, {"name": "comments", "list": [{"name": "ContentLicense", "dtype": "string"}, {"name": "CreationDate", "dtype": "string"}, {"name": "Id", "dtype": "string"}, {"name": "Score", "dtype": "string"}, {"name": "body", "dtype": "string"}]}, {"name": "answers", "list": [{"name": "body", "dtype": "string"}, {"name": "comments", "list": [{"name": "ContentLicense", "dtype": "string"}, {"name": "CreationDate", "dtype": "string"}, {"name": "Id", "dtype": "string"}, {"name": "Score", "dtype": "string"}, {"name": "body", "dtype": "string"}]}, {"name": "meta_data", "struct": [{"name": "CommentCount", "dtype": "string"}, {"name": "ContentLicense", "dtype": "string"}, {"name": "CreationDate", "dtype": "string"}, {"name": "Id", "dtype": "string"}, {"name": "ParentId", "dtype": "string"}, {"name": "Score", "dtype": "string"}]}]}, {"name": "meta_data", "struct": [{"name": "AcceptedAnswerId", "dtype": "string"}, {"name": "CommentCount", "dtype": "string"}, {"name": "ContentLicense", "dtype": "string"}, {"name": "CreationDate", "dtype": "string"}, {"name": "Id", "dtype": "string"}, {"name": "Score", "dtype": "string"}, {"name": "Tags", "sequence": "string"}, {"name": "Title", "dtype": "string"}]}, {"name": "question_id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 206395804, "num_examples": 37026}], "download_size": 106795288, "dataset_size": 206395804}}
2022-12-28T00:22:34+00:00
5f248d88da38b4a226c76f01aee81bebaac75632
A collection of emulated 2D noisy images, a clean and noisy image in pairs. <br> 256 x 256 x 1 <br> Octaves: 4 <br> Weight: 30
SinonTM/Synth-Nav
[ "task_categories:feature-extraction", "annotations_creators:machine-generated", "language_creators:machine-generated", "size_categories:10K<n<100K", "source_datasets:original", "license:gpl-3.0", "region:us" ]
2022-12-28T01:11:04+00:00
{"annotations_creators": ["machine-generated"], "language_creators": ["machine-generated"], "language": [], "license": ["gpl-3.0"], "multilinguality": [], "size_categories": ["10K<n<100K"], "source_datasets": ["original"], "task_categories": ["feature-extraction"], "task_ids": [], "pretty_name": "GNGIDS", "tags": []}
2023-01-30T19:45:23+00:00
dee66c8281e8162aae3f854083cb2c1e21f069e7
# Dataset Card for "2048_has_code_filtered_base_code_review" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Dahoas/2048_has_code_filtered_base_code_review
[ "region:us" ]
2022-12-28T02:43:32+00:00
{"dataset_info": {"features": [{"name": "body", "dtype": "string"}, {"name": "comments", "list": [{"name": "ContentLicense", "dtype": "string"}, {"name": "CreationDate", "dtype": "string"}, {"name": "Id", "dtype": "string"}, {"name": "Score", "dtype": "string"}, {"name": "body", "dtype": "string"}]}, {"name": "answers", "list": [{"name": "body", "dtype": "string"}, {"name": "comments", "list": [{"name": "ContentLicense", "dtype": "string"}, {"name": "CreationDate", "dtype": "string"}, {"name": "Id", "dtype": "string"}, {"name": "Score", "dtype": "string"}, {"name": "body", "dtype": "string"}]}, {"name": "meta_data", "struct": [{"name": "CommentCount", "dtype": "string"}, {"name": "ContentLicense", "dtype": "string"}, {"name": "CreationDate", "dtype": "string"}, {"name": "Id", "dtype": "string"}, {"name": "ParentId", "dtype": "string"}, {"name": "Score", "dtype": "string"}]}]}, {"name": "meta_data", "struct": [{"name": "AcceptedAnswerId", "dtype": "string"}, {"name": "CommentCount", "dtype": "string"}, {"name": "ContentLicense", "dtype": "string"}, {"name": "CreationDate", "dtype": "string"}, {"name": "Id", "dtype": "string"}, {"name": "Score", "dtype": "string"}, {"name": "Tags", "sequence": "string"}, {"name": "Title", "dtype": "string"}]}, {"name": "question_id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 168922714, "num_examples": 30898}], "download_size": 87127135, "dataset_size": 168922714}}
2022-12-28T16:32:52+00:00
5545141baf0d57257cb2032ecb1040ebbba058c9
alpaco_4
com0040/ai-hub_sum
[ "region:us" ]
2022-12-28T03:14:29+00:00
{}
2022-12-28T04:33:26+00:00
b8f7d168b6f4e95b2a92e84768bd6c955bed2f29
# Dataset Card for Summarize from Feedback ## Dataset Description In the [Learning to Summarize from Human Feedback paper](https://arxiv.org/abs/2009.01325), a reward model was trained from human feedback. The reward model was then used to train a summarization model to align with human preferences. This is the dataset of human feedback that was released for reward modelling. There are two parts of this dataset: `comparisons` and `axis`. In the `comparisons` part, human annotators were asked to choose the best out of two summaries. In the `axis` part, human annotators gave scores on a likert scale for the quality of a summary. The `comparisons` part only has a train and validation split, and the `axis` part only has a test and validation split. The summaries used for training the reward model in the paper come from the TL;DR dataset. Additional validation and test data come from the TL;DR dataset, CNN articles, and Daily Mail articles. For more information, see the repo [here](https://github.com/openai/summarize-from-feedback#human-feedback-data). ## Citation Information [https://arxiv.org/abs/2009.01325](https://arxiv.org/abs/2009.01325) ``` @inproceedings{stienon2020learning, author = {Nisan Stiennon and Long Ouyang and Jeff Wu and Daniel M. Ziegler and Ryan Lowe and Chelsea Voss and Alec Radford and Dario Amodei and Paul Christiano}, title = {Learning to summarize from human feedback}, booktitle = {NeurIPS}, year = 2020, } ``` Dataset added to the Hugging Face Hub with help from [@Tristan](https://huggingface.co/Tristan)
openai/summarize_from_feedback
[ "arxiv:2009.01325", "region:us" ]
2022-12-28T03:42:47+00:00
{"pretty_name": "Summarize from Feedback"}
2023-01-03T16:55:41+00:00
18722059217813fd636c2e2f4b3cc6a508ab47fd
# Dataset Card for "dreambooth-hackathon-images-srkman" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Xhaheen/dreambooth-hackathon-images-srkman
[ "region:us" ]
2022-12-28T03:57:08+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 4082680.0, "num_examples": 20}], "download_size": 4081453, "dataset_size": 4082680.0}}
2022-12-28T03:57:13+00:00
8e08ea0dda44a5c942164865f3c2fc10f0e476ab
<div align="center"> <img width="640" alt="keremberke/valorant-object-detection" src="https://huggingface.co/datasets/keremberke/valorant-object-detection/resolve/main/thumbnail.jpg"> </div> ### Dataset Labels ``` ['dropped spike', 'enemy', 'planted spike', 'teammate'] ``` ### Number of Images ```json {'valid': 1983, 'train': 6927, 'test': 988} ``` ### How to Use - Install [datasets](https://pypi.org/project/datasets/): ```bash pip install datasets ``` - Load the dataset: ```python from datasets import load_dataset ds = load_dataset("keremberke/valorant-object-detection", name="full") example = ds['train'][0] ``` ### Roboflow Dataset Page [https://universe.roboflow.com/daniels-magonis-0pjzx/valorant-9ufcp/dataset/3](https://universe.roboflow.com/daniels-magonis-0pjzx/valorant-9ufcp/dataset/3?ref=roboflow2huggingface) ### Citation ``` @misc{ valorant-9ufcp_dataset, title = { valorant Dataset }, type = { Open Source Dataset }, author = { Daniels Magonis }, howpublished = { \\url{ https://universe.roboflow.com/daniels-magonis-0pjzx/valorant-9ufcp } }, url = { https://universe.roboflow.com/daniels-magonis-0pjzx/valorant-9ufcp }, journal = { Roboflow Universe }, publisher = { Roboflow }, year = { 2022 }, month = { nov }, note = { visited on 2023-01-27 }, } ``` ### License CC BY 4.0 ### Dataset Summary This dataset was exported via roboflow.com on December 22, 2022 at 5:10 PM GMT Roboflow is an end-to-end computer vision platform that helps you * collaborate with your team on computer vision projects * collect & organize images * understand unstructured image data * annotate, and create datasets * export, train, and deploy computer vision models * use active learning to improve your dataset over time It includes 9898 images. Planted are annotated in COCO format. The following pre-processing was applied to each image: * Resize to 416x416 (Stretch) No image augmentation techniques were applied.
keremberke/valorant-object-detection
[ "task_categories:object-detection", "roboflow", "roboflow2huggingface", "region:us" ]
2022-12-28T05:41:05+00:00
{"task_categories": ["object-detection"], "tags": ["roboflow", "roboflow2huggingface"]}
2023-01-27T13:45:00+00:00
6c452a88281eac5504f0fa5344d468ab2f731cf4
alexandreteles/mental-health-conversational-data
[ "license:other", "region:us" ]
2022-12-28T06:03:01+00:00
{"license": "other", "dataset_info": {"features": [{"name": "Context", "dtype": "string"}, {"name": "Knowledge", "dtype": "string"}, {"name": "Response", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 79696, "num_examples": 661}], "download_size": 21508, "dataset_size": 79696}}
2022-12-28T06:19:29+00:00
0c8e46cbfe8edf71e592f495face94ba22155b46
### Roboflow Dataset Page https://universe.roboflow.com/ashish-cuamw/test-y7rj3 ### Citation ``` @misc{ test-y7rj3_dataset, title = { test Dataset }, type = { Open Source Dataset }, author = { ashish }, howpublished = { \\url{ https://universe.roboflow.com/ashish-cuamw/test-y7rj3 } }, url = { https://universe.roboflow.com/ashish-cuamw/test-y7rj3 }, journal = { Roboflow Universe }, publisher = { Roboflow }, year = { 2022 }, month = { oct }, note = { visited on 2022-12-28 }, } ``` ### License CC BY 4.0 ### Dataset Summary This dataset was exported via roboflow.com on December 26, 2022 at 10:13 PM GMT Roboflow is an end-to-end computer vision platform that helps you * collaborate with your team on computer vision projects * collect & organize images * understand unstructured image data * annotate, and create datasets * export, train, and deploy computer vision models * use active learning to improve your dataset over time It includes 4666 images. T are annotated in COCO format. The following pre-processing was applied to each image: * Auto-orientation of pixel data (with EXIF-orientation stripping) * Resize to 416x416 (Stretch) No image augmentation techniques were applied.
fcakyon/gun-object-detection
[ "task_categories:object-detection", "roboflow", "region:us" ]
2022-12-28T06:20:48+00:00
{"task_categories": ["object-detection"], "tags": ["roboflow"]}
2022-12-28T06:22:36+00:00
57f666aba71e625f54419982f4e0fadb670a5be6
# Dataset Card for "beats" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
taejunkim/beats
[ "region:us" ]
2022-12-28T06:50:28+00:00
{"dataset_info": {"features": [{"name": "mix_id", "dtype": "string"}, {"name": "beats", "sequence": "float64"}], "splits": [{"name": "train", "num_bytes": 1479883, "num_examples": 13}], "download_size": 1119868, "dataset_size": 1479883}}
2022-12-28T06:50:44+00:00
c724f29df119603ea08b378189ae14f46c690fd6
hellosimple/dataset-demo
[ "license:mit", "region:us" ]
2022-12-28T07:18:49+00:00
{"license": "mit"}
2022-12-28T07:19:37+00:00
6bf472660d9a2fe3b880326cceaa0a345a98e2b4
# Dataset Card for "alignments" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
taejunkim/alignments
[ "region:us" ]
2022-12-28T07:48:57+00:00
{"dataset_info": {"features": [{"name": "mix_id", "dtype": "string"}, {"name": "track_id", "dtype": "string"}, {"name": "case_name", "dtype": "string"}, {"name": "feature", "dtype": "string"}, {"name": "metric", "dtype": "string"}, {"name": "key_change", "dtype": "int64"}, {"name": "match_rate", "dtype": "float64"}, {"name": "match_rate_raw", "dtype": "float64"}, {"name": "matched_beats", "dtype": "int64"}, {"name": "matched_beats_raw", "dtype": "int64"}, {"name": "matched_time_mix", "dtype": "float64"}, {"name": "matched_time_track", "dtype": "float64"}, {"name": "mix_cue_in_beat", "dtype": "float64"}, {"name": "mix_cue_out_beat", "dtype": "float64"}, {"name": "track_cue_in_beat", "dtype": "float64"}, {"name": "track_cue_out_beat", "dtype": "float64"}, {"name": "mix_cue_in_time", "dtype": "float64"}, {"name": "mix_cue_out_time", "dtype": "float64"}, {"name": "track_cue_in_time", "dtype": "float64"}, {"name": "track_cue_out_time", "dtype": "float64"}, {"name": "cost", "dtype": "float64"}, {"name": "__index_level_0__", "dtype": "int64"}, {"name": "wp", "sequence": {"sequence": "int64"}}], "splits": [{"name": "train", "num_bytes": 22961341, "num_examples": 6600}], "download_size": 3089520, "dataset_size": 22961341}}
2022-12-28T07:49:17+00:00
609fe5465148b8b1dde33dd01c085be2fc4f4a07
shader123/123test
[ "license:afl-3.0", "region:us" ]
2022-12-28T08:15:19+00:00
{"license": "afl-3.0"}
2022-12-28T08:15:39+00:00
c5ea3f8bc1df7c02ab2516cf1aeff73a6f3b71ec
# Dataset Card for "SmokeFire" Wildfires or forest fires are unpredictable catastrophic and destructive events that affect rural areas. The impact of these events affects both vegetation and wildlife. This dataset can be used to train networks able to detect smoke and/or fire in forest environments. ## Data Sources & Description - **This dataset consist of sample from two datasets hosted on Kaggle:** - [Forest Fire](https://www.kaggle.com/datasets/kutaykutlu/forest-fire?select=train_fire) - [Forest Fire Images](https://www.kaggle.com/datasets/mohnishsaiprasad/forest-fire-images) - **The datasets consist of:** - 2525 **Fire** samples - 2525 **Smoke** samples - 2525 **Normal** samples - **The dataset is splitted into:** - Train Set -> 6060 samples - Validation Set -> 756 samples - Test Set -> 759 samples
EdBianchi/SmokeFire
[ "region:us" ]
2022-12-28T09:21:45+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "Fire", "1": "Normal", "2": "Smoke"}}}}], "splits": [{"name": "train", "num_bytes": 166216842.46, "num_examples": 6060}, {"name": "test", "num_bytes": 89193578.0, "num_examples": 759}, {"name": "validation", "num_bytes": 75838884.0, "num_examples": 756}], "download_size": 890673915, "dataset_size": 331249304.46000004}}
2022-12-29T14:45:31+00:00
89e629172b5896f83ce0c0f0a7d0dc88179d373b
# Dataset Card for "kira-dog" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
fabiochiu/kira-dog
[ "region:us" ]
2022-12-28T09:33:51+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 1300183.0, "num_examples": 5}], "download_size": 1301094, "dataset_size": 1300183.0}}
2022-12-28T09:41:49+00:00
60208e25d934da68e9f0e0b69f4e71354b62c4a7
# Introduction Face Synthetics dataset is a collection of diverse synthetic face images with ground truth labels. It was introduced in our paper Fake It Till You Make It: Face analysis in the wild using synthetic data alone. Our dataset contains: 100,000 images of faces at 512 x 512 pixel resolution 70 standard facial landmark annotations per-pixel semantic class anotations It can be used to train machine learning systems for face-related tasks such as landmark localization and face parsing, showing that synthetic data can both match real data in accuracy as well as open up new approaches where manual labelling would be impossible. Some images also include hands and off-center distractor faces in addition to primary faces centered in the image. The Face Synthetics dataset can be used for non-commercial research, and is licensed under the license found in LICENSE.txt. # Dataset Layout The Face Synthetics dataset is a single .zip file containing color images, segmentation images, and 2D landmark coordinates in a text file. ```markdown dataset.zip | |- {frame_id}.png # Rendered image of a face |- {frame_id}_seg.pmg # Segmentation image |- {frame_id}_ldmks.txt # Landmark annotations for 70 facial landmarks (x,y) ``` # Download A small subset of the original dataset can be found here; in order to train models in the entire dataset, please refer to [Microsoft original repo](https://github.com/microsoft/FaceSynthetics).
hedrergudene/FakeItTillYouMakeIt
[ "region:us" ]
2022-12-28T11:20:38+00:00
{}
2022-12-28T11:34:26+00:00
837bb77ac540e7716d56c1871afd872fdd829d14
muhammedplu/turkish-needle-laces
[ "license:afl-3.0", "region:us" ]
2022-12-28T11:31:54+00:00
{"license": "afl-3.0"}
2022-12-28T11:31:56+00:00
e36371eed2075cb024898054fd380b9bb4577da4
# Dataset Card for "processed_roberta_EHR_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
enpassant/processed_roberta_EHR_dataset
[ "region:us" ]
2022-12-28T11:55:43+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "special_tokens_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 73729188.0, "num_examples": 23907}, {"name": "test", "num_bytes": 18414564.0, "num_examples": 5971}], "download_size": 23660173, "dataset_size": 92143752.0}}
2022-12-28T14:56:25+00:00
4113b6719a283cc09af083532482f2fb5b71ab99
# Dataset Card for "agnews_weak_labeling" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
argilla/agnews_weak_labeling
[ "language:en", "region:us" ]
2022-12-28T14:16:31+00:00
{"language": "en", "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "inputs", "struct": [{"name": "text", "dtype": "string"}]}, {"name": "prediction", "dtype": "null"}, {"name": "prediction_agent", "dtype": "null"}, {"name": "annotation", "dtype": "string"}, {"name": "annotation_agent", "dtype": "null"}, {"name": "multi_label", "dtype": "bool"}, {"name": "explanation", "dtype": "null"}, {"name": "id", "dtype": "null"}, {"name": "metadata", "struct": [{"name": "split", "dtype": "string"}]}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "null"}, {"name": "metrics", "dtype": "null"}, {"name": "vectors", "struct": [{"name": "mini-lm-sentence-transformers", "sequence": "float64"}]}], "splits": [{"name": "train", "num_bytes": 25212139, "num_examples": 7000}], "download_size": 20872343, "dataset_size": 25212139}}
2023-07-13T10:46:28+00:00
3d0f8f134f164d09cc19d6f8f532195fc7401dc2
fmattera/couch_ikea
[ "license:afl-3.0", "region:us" ]
2022-12-28T14:26:06+00:00
{"license": "afl-3.0"}
2022-12-28T15:02:11+00:00
1112b92c47883725b24b2de33dfe38ddfae686e3
maavangent/inkmvgnt
[ "license:cc", "region:us" ]
2022-12-28T14:52:50+00:00
{"license": "cc"}
2022-12-28T14:53:46+00:00
606ad92803e43a887e4d57b71a2ca8a61075ce39
# Dataset Card for "test-16722377061524" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
albertvillanova/bad-request
[ "region:us" ]
2022-12-28T14:57:24+00:00
{"dataset_info": {"features": [{"name": "x", "dtype": "int64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 48, "num_examples": 3}], "download_size": 950, "dataset_size": 48}}
2022-12-28T14:57:56+00:00
f8f76ee688abff1197ccc21a305e9c22c5a195f7
# Dataset Card for "ehr-roberta-tokenized_datasets-12-2022" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
enpassant/ehr-roberta-tokenized_datasets-12-2022
[ "region:us" ]
2022-12-28T16:05:47+00:00
{"dataset_info": {"features": [{"name": "text", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 2740313917.0, "num_examples": 1067816}, {"name": "test", "num_bytes": 144100283.0, "num_examples": 56152}], "download_size": 478603077, "dataset_size": 2884414200.0}}
2022-12-28T16:30:47+00:00
96861cc20719df721e7300949c0817e853725274
# Dataset Card for "dreambooth-hackathon-rick-and-morty-images" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Kayvane/dreambooth-hackathon-rick-and-morty-images
[ "region:us" ]
2022-12-28T16:06:54+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 3505542.0, "num_examples": 17}], "download_size": 3500492, "dataset_size": 3505542.0}}
2022-12-28T16:07:01+00:00
4e34116fbb006cb6a8a0560d28801b21be2c603c
successor/qrl-docs
[ "license:mit", "region:us" ]
2022-12-28T16:12:54+00:00
{"license": "mit"}
2022-12-28T18:24:28+00:00
5855fc5968b3b9db7b1cabb465646acd91bc7c67
### imagenette-160px-facebook-convnext-tiny-224.mk.tar.gz ```python data = mk.get("imagenette", version="160px") df = mk.DataFrame.read("https://huggingface.co/datasets/meerkat-ml/meerkat-dataframes/resolve/main/imagenette-160px-facebook-convnext-tiny-224.mk.tar.gz") df = data.merge(df[["img_id", "logits", "pred"]], on="img_id") ```
meerkat-ml/meerkat-dataframes
[ "region:us" ]
2022-12-28T16:17:09+00:00
{}
2023-03-12T03:48:46+00:00
2469f5c61089862475618b921e5690297bccaa21
# Dataset Card for "bookcorpus_deduplicated" ## Dataset Summary This is a deduplicated version of the original [Book Corpus dataset](https://huggingface.co/datasets/bookcorpus). The Book Corpus (Zhu et al., 2015), which was used to train popular models such as BERT, has a substantial amount of exact-duplicate documents according to [Bandy and Vincent (2021)](https://arxiv.org/abs/2105.05241) [Bandy and Vincent (2021)](https://arxiv.org/abs/2105.05241) find that thousands of books in BookCorpus are duplicated, with only 7,185 unique books out of 11,038 total. Effect of deduplication - Num of lines: 38832894 VS 74004228 - Dataset size: 2.91GB VS 4.63GB The duplicate text has been droped and only the first appearance is kept. The order of text appearance is kept. ## Why deduplicate? Deduplication of training data has showed various advantages, including: - require fewer training steps to achieve the same or better accuracy - train models that emit memorized text ten times less frequently - reduce carbon emission and energy consumption cf [Deduplicating Training Data Makes Language Models Better](https://arxiv.org/abs/2107.06499) ## Deduplication script ```python import pandas as pd from datasets import load_dataset dataset = load_dataset("bookcorpus")["train"]["text"] df = pd.Dataframe({"text":dataset}) # drop duplicates(exact match) df_filtered = df["text"].drop_duplicates() df_filtered.to_csv("bookcorpus_filtered.csv","index"=False,"header"=False) new_dataset = load_dataset("text",data_files={"train":"bookcorpus_filtered.csv"}) ``` The running time is short, less than several minutes. More sophicated deduplication algorithms can be applied to improve the performance, such as https://github.com/google-research/deduplicate-text-datasets ## Reference ```bib @misc{https://doi.org/10.48550/arxiv.2105.05241, doi = {10.48550/ARXIV.2105.05241}, url = {https://arxiv.org/abs/2105.05241}, author = {Bandy, Jack and Vincent, Nicholas}, keywords = {Computation and Language (cs.CL), Computers and Society (cs.CY), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Addressing "Documentation Debt" in Machine Learning Research: A Retrospective Datasheet for BookCorpus}, publisher = {arXiv}, year = {2021}, copyright = {arXiv.org perpetual, non-exclusive license} } ``` ```bib @misc{https://doi.org/10.48550/arxiv.2107.06499, doi = {10.48550/ARXIV.2107.06499}, url = {https://arxiv.org/abs/2107.06499}, author = {Lee, Katherine and Ippolito, Daphne and Nystrom, Andrew and Zhang, Chiyuan and Eck, Douglas and Callison-Burch, Chris and Carlini, Nicholas}, keywords = {Computation and Language (cs.CL), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Deduplicating Training Data Makes Language Models Better}, publisher = {arXiv}, year = {2021}, copyright = {arXiv.org perpetual, non-exclusive license} } ``` ```bib @misc{https://doi.org/10.48550/arxiv.2209.00099, doi = {10.48550/ARXIV.2209.00099}, url = {https://arxiv.org/abs/2209.00099}, author = {Treviso, Marcos and Ji, Tianchu and Lee, Ji-Ung and van Aken, Betty and Cao, Qingqing and Ciosici, Manuel R. and Hassid, Michael and Heafield, Kenneth and Hooker, Sara and Martins, Pedro H. and Martins, André F. T. and Milder, Peter and Raffel, Colin and Simpson, Edwin and Slonim, Noam and Balasubramanian, Niranjan and Derczynski, Leon and Schwartz, Roy}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Methods for Natural Language Processing: A Survey}, publisher = {arXiv}, year = {2022}, copyright = {arXiv.org perpetual, non-exclusive license} } ``` [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
saibo/bookcorpus_deduplicated
[ "arxiv:2105.05241", "arxiv:2107.06499", "arxiv:2209.00099", "region:us" ]
2022-12-28T16:41:10+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2867856394, "num_examples": 38832894}], "download_size": 1794567875, "dataset_size": 2867856394}}
2022-12-29T16:24:22+00:00
0510ee55922a550615bf58c10d65c7856f534863
# Dataset Card for "EHR-roberta-base-tokenized-dataset-2022-12" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
enpassant/EHR-roberta-base-tokenized-dataset-2022-12
[ "region:us" ]
2022-12-28T16:48:58+00:00
{"dataset_info": {"features": [{"name": "text", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 2738869059.0, "num_examples": 1067255}, {"name": "test", "num_bytes": 145534885.0, "num_examples": 56709}], "download_size": 477844625, "dataset_size": 2884403944.0}}
2022-12-28T16:50:39+00:00
beabfde25ee24531ac93f03f317e737c7ec45945
|Dataset|Bytes|Samples|Capping| |-------|-----|-------|-------| |[Unnatural Instructions](https://huggingface.co/datasets/mrm8488/unnatural-instructions-full) | 27M | 66010 | / | |[Big-Bench](https://huggingface.co/datasets/bigbench) | 1.7G | 2631238| / | |[FLAN](https://huggingface.co/datasets/Muennighoff/flan) | 3.1G | 3354260 | [30K examples per dataset max with 10 templates total (So 3K / template)](https://github.com/Muennighoff/FLAN/blob/main/flan/tasks.py) | |[SuperNatural-Instructions](https://huggingface.co/datasets/Muennighoff/natural-instructions) | 7.4G | 7101558 | / | |[StackOverflow](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_titlebody_best_voted_answer_jsonl) | 9.0G | 4730542 | / | |[xP3-EN](https://huggingface.co/datasets/bigscience/xP3) | 37G | 31495184 | [100K examples per data subset per prompt allowed (So 100K / template)](https://github.com/bigscience-workshop/bigscience/blob/e848657707a549dda35c8b3cc63a96d2064b2983/data/xp3/prepare_xp3_train.py#L15) | |Total|58GB|49378792|
taskydata/realtasky
[ "language:en", "region:us" ]
2022-12-28T16:55:33+00:00
{"language": ["en"]}
2023-03-22T10:46:54+00:00
33e763e952201323d99c5afe93766a5697b31b38
aashsach/multiconer2
[ "region:us" ]
2022-12-28T17:03:44+00:00
{"dataset_info": [{"config_name": "bn", "features": [{"name": "id", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "ner_tags", "sequence": {"class_label": {"names": {"0": "O", "1": "B-AerospaceManufacturer", "2": "I-AerospaceManufacturer", "3": "B-AnatomicalStructure", "4": "I-AnatomicalStructure", "5": "B-ArtWork", "6": "I-ArtWork", "7": "B-Artist", "8": "I-Artist", "9": "B-Athlete", "10": "I-Athlete", "11": "B-CarManufacturer", "12": "I-CarManufacturer", "13": "B-Cleric", "14": "I-Cleric", "15": "B-Clothing", "16": "I-Clothing", "17": "B-Disease", "18": "I-Disease", "19": "B-Drink", "20": "I-Drink", "21": "B-Facility", "22": "I-Facility", "23": "B-Food", "24": "I-Food", "25": "B-HumanSettlement", "26": "I-HumanSettlement", "27": "B-MedicalProcedure", "28": "I-MedicalProcedure", "29": "B-Medication/Vaccine", "30": "I-Medication/Vaccine", "31": "B-MusicalGRP", "32": "I-MusicalGRP", "33": "B-MusicalWork", "34": "I-MusicalWork", "35": "B-ORG", "36": "I-ORG", "37": "B-OtherLOC", "38": "I-OtherLOC", "39": "B-OtherPER", "40": "I-OtherPER", "41": "B-OtherPROD", "42": "I-OtherPROD", "43": "B-Politician", "44": "I-Politician", "45": "B-PrivateCorp", "46": "I-PrivateCorp", "47": "B-PublicCorp", "48": "I-PublicCorp", "49": "B-Scientist", "50": "I-Scientist", "51": "B-Software", "52": "I-Software", "53": "B-SportsGRP", "54": "I-SportsGRP", "55": "B-SportsManager", "56": "I-SportsManager", "57": "B-Station", "58": "I-Station", "59": "B-Symptom", "60": "I-Symptom", "61": "B-Vehicle", "62": "I-Vehicle", "63": "B-VisualWork", "64": "I-VisualWork", "65": "B-WrittenWork", "66": "I-WrittenWork"}}}}], "splits": [{"name": "train", "num_bytes": 3844480, "num_examples": 9708}, {"name": "validation", "num_bytes": 199756, "num_examples": 507}], "download_size": 4017205, "dataset_size": 4044236}, {"config_name": "de", "features": [{"name": "id", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "ner_tags", "sequence": {"class_label": {"names": {"0": "O", "1": "B-AerospaceManufacturer", "2": "I-AerospaceManufacturer", "3": "B-AnatomicalStructure", "4": "I-AnatomicalStructure", "5": "B-ArtWork", "6": "I-ArtWork", "7": "B-Artist", "8": "I-Artist", "9": "B-Athlete", "10": "I-Athlete", "11": "B-CarManufacturer", "12": "I-CarManufacturer", "13": "B-Cleric", "14": "I-Cleric", "15": "B-Clothing", "16": "I-Clothing", "17": "B-Disease", "18": "I-Disease", "19": "B-Drink", "20": "I-Drink", "21": "B-Facility", "22": "I-Facility", "23": "B-Food", "24": "I-Food", "25": "B-HumanSettlement", "26": "I-HumanSettlement", "27": "B-MedicalProcedure", "28": "I-MedicalProcedure", "29": "B-Medication/Vaccine", "30": "I-Medication/Vaccine", "31": "B-MusicalGRP", "32": "I-MusicalGRP", "33": "B-MusicalWork", "34": "I-MusicalWork", "35": "B-ORG", "36": "I-ORG", "37": "B-OtherLOC", "38": "I-OtherLOC", "39": "B-OtherPER", "40": "I-OtherPER", "41": "B-OtherPROD", "42": "I-OtherPROD", "43": "B-Politician", "44": "I-Politician", "45": "B-PrivateCorp", "46": "I-PrivateCorp", "47": "B-PublicCorp", "48": "I-PublicCorp", "49": "B-Scientist", "50": "I-Scientist", "51": "B-Software", "52": "I-Software", "53": "B-SportsGRP", "54": "I-SportsGRP", "55": "B-SportsManager", "56": "I-SportsManager", "57": "B-Station", "58": "I-Station", "59": "B-Symptom", "60": "I-Symptom", "61": "B-Vehicle", "62": "I-Vehicle", "63": "B-VisualWork", "64": "I-VisualWork", "65": "B-WrittenWork", "66": "I-WrittenWork"}}}}], "splits": [{"name": "train", "num_bytes": 2724923, "num_examples": 9785}, {"name": "validation", "num_bytes": 137726, "num_examples": 512}], "download_size": 2831813, "dataset_size": 2862649}, {"config_name": "en", "features": [{"name": "id", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "ner_tags", "sequence": {"class_label": {"names": {"0": "O", "1": "B-AerospaceManufacturer", "2": "I-AerospaceManufacturer", "3": "B-AnatomicalStructure", "4": "I-AnatomicalStructure", "5": "B-ArtWork", "6": "I-ArtWork", "7": "B-Artist", "8": "I-Artist", "9": "B-Athlete", "10": "I-Athlete", "11": "B-CarManufacturer", "12": "I-CarManufacturer", "13": "B-Cleric", "14": "I-Cleric", "15": "B-Clothing", "16": "I-Clothing", "17": "B-Disease", "18": "I-Disease", "19": "B-Drink", "20": "I-Drink", "21": "B-Facility", "22": "I-Facility", "23": "B-Food", "24": "I-Food", "25": "B-HumanSettlement", "26": "I-HumanSettlement", "27": "B-MedicalProcedure", "28": "I-MedicalProcedure", "29": "B-Medication/Vaccine", "30": "I-Medication/Vaccine", "31": "B-MusicalGRP", "32": "I-MusicalGRP", "33": "B-MusicalWork", "34": "I-MusicalWork", "35": "B-ORG", "36": "I-ORG", "37": "B-OtherLOC", "38": "I-OtherLOC", "39": "B-OtherPER", "40": "I-OtherPER", "41": "B-OtherPROD", "42": "I-OtherPROD", "43": "B-Politician", "44": "I-Politician", "45": "B-PrivateCorp", "46": "I-PrivateCorp", "47": "B-PublicCorp", "48": "I-PublicCorp", "49": "B-Scientist", "50": "I-Scientist", "51": "B-Software", "52": "I-Software", "53": "B-SportsGRP", "54": "I-SportsGRP", "55": "B-SportsManager", "56": "I-SportsManager", "57": "B-Station", "58": "I-Station", "59": "B-Symptom", "60": "I-Symptom", "61": "B-Vehicle", "62": "I-Vehicle", "63": "B-VisualWork", "64": "I-VisualWork", "65": "B-WrittenWork", "66": "I-WrittenWork"}}}}], "splits": [{"name": "train", "num_bytes": 4448839, "num_examples": 16778}, {"name": "validation", "num_bytes": 232735, "num_examples": 871}], "download_size": 4575462, "dataset_size": 4681574}, {"config_name": "es", "features": [{"name": "id", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "ner_tags", "sequence": {"class_label": {"names": {"0": "O", "1": "B-AerospaceManufacturer", "2": "I-AerospaceManufacturer", "3": "B-AnatomicalStructure", "4": "I-AnatomicalStructure", "5": "B-ArtWork", "6": "I-ArtWork", "7": "B-Artist", "8": "I-Artist", "9": "B-Athlete", "10": "I-Athlete", "11": "B-CarManufacturer", "12": "I-CarManufacturer", "13": "B-Cleric", "14": "I-Cleric", "15": "B-Clothing", "16": "I-Clothing", "17": "B-Disease", "18": "I-Disease", "19": "B-Drink", "20": "I-Drink", "21": "B-Facility", "22": "I-Facility", "23": "B-Food", "24": "I-Food", "25": "B-HumanSettlement", "26": "I-HumanSettlement", "27": "B-MedicalProcedure", "28": "I-MedicalProcedure", "29": "B-Medication/Vaccine", "30": "I-Medication/Vaccine", "31": "B-MusicalGRP", "32": "I-MusicalGRP", "33": "B-MusicalWork", "34": "I-MusicalWork", "35": "B-ORG", "36": "I-ORG", "37": "B-OtherLOC", "38": "I-OtherLOC", "39": "B-OtherPER", "40": "I-OtherPER", "41": "B-OtherPROD", "42": "I-OtherPROD", "43": "B-Politician", "44": "I-Politician", "45": "B-PrivateCorp", "46": "I-PrivateCorp", "47": "B-PublicCorp", "48": "I-PublicCorp", "49": "B-Scientist", "50": "I-Scientist", "51": "B-Software", "52": "I-Software", "53": "B-SportsGRP", "54": "I-SportsGRP", "55": "B-SportsManager", "56": "I-SportsManager", "57": "B-Station", "58": "I-Station", "59": "B-Symptom", "60": "I-Symptom", "61": "B-Vehicle", "62": "I-Vehicle", "63": "B-VisualWork", "64": "I-VisualWork", "65": "B-WrittenWork", "66": "I-WrittenWork"}}}}], "splits": [{"name": "train", "num_bytes": 4643093, "num_examples": 16453}, {"name": "validation", "num_bytes": 237306, "num_examples": 854}], "download_size": 4659064, "dataset_size": 4880399}, {"config_name": "fa", "features": [{"name": "id", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "ner_tags", "sequence": {"class_label": {"names": {"0": "O", "1": "B-AerospaceManufacturer", "2": "I-AerospaceManufacturer", "3": "B-AnatomicalStructure", "4": "I-AnatomicalStructure", "5": "B-ArtWork", "6": "I-ArtWork", "7": "B-Artist", "8": "I-Artist", "9": "B-Athlete", "10": "I-Athlete", "11": "B-CarManufacturer", "12": "I-CarManufacturer", "13": "B-Cleric", "14": "I-Cleric", "15": "B-Clothing", "16": "I-Clothing", "17": "B-Disease", "18": "I-Disease", "19": "B-Drink", "20": "I-Drink", "21": "B-Facility", "22": "I-Facility", "23": "B-Food", "24": "I-Food", "25": "B-HumanSettlement", "26": "I-HumanSettlement", "27": "B-MedicalProcedure", "28": "I-MedicalProcedure", "29": "B-Medication/Vaccine", "30": "I-Medication/Vaccine", "31": "B-MusicalGRP", "32": "I-MusicalGRP", "33": "B-MusicalWork", "34": "I-MusicalWork", "35": "B-ORG", "36": "I-ORG", "37": "B-OtherLOC", "38": "I-OtherLOC", "39": "B-OtherPER", "40": "I-OtherPER", "41": "B-OtherPROD", "42": "I-OtherPROD", "43": "B-Politician", "44": "I-Politician", "45": "B-PrivateCorp", "46": "I-PrivateCorp", "47": "B-PublicCorp", "48": "I-PublicCorp", "49": "B-Scientist", "50": "I-Scientist", "51": "B-Software", "52": "I-Software", "53": "B-SportsGRP", "54": "I-SportsGRP", "55": "B-SportsManager", "56": "I-SportsManager", "57": "B-Station", "58": "I-Station", "59": "B-Symptom", "60": "I-Symptom", "61": "B-Vehicle", "62": "I-Vehicle", "63": "B-VisualWork", "64": "I-VisualWork", "65": "B-WrittenWork", "66": "I-WrittenWork"}}}}], "splits": [{"name": "train", "num_bytes": 5861165, "num_examples": 16321}, {"name": "validation", "num_bytes": 316929, "num_examples": 855}], "download_size": 5760501, "dataset_size": 6178094}, {"config_name": "fr", "features": [{"name": "id", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "ner_tags", "sequence": {"class_label": {"names": {"0": "O", "1": "B-AerospaceManufacturer", "2": "I-AerospaceManufacturer", "3": "B-AnatomicalStructure", "4": "I-AnatomicalStructure", "5": "B-ArtWork", "6": "I-ArtWork", "7": "B-Artist", "8": "I-Artist", "9": "B-Athlete", "10": "I-Athlete", "11": "B-CarManufacturer", "12": "I-CarManufacturer", "13": "B-Cleric", "14": "I-Cleric", "15": "B-Clothing", "16": "I-Clothing", "17": "B-Disease", "18": "I-Disease", "19": "B-Drink", "20": "I-Drink", "21": "B-Facility", "22": "I-Facility", "23": "B-Food", "24": "I-Food", "25": "B-HumanSettlement", "26": "I-HumanSettlement", "27": "B-MedicalProcedure", "28": "I-MedicalProcedure", "29": "B-Medication/Vaccine", "30": "I-Medication/Vaccine", "31": "B-MusicalGRP", "32": "I-MusicalGRP", "33": "B-MusicalWork", "34": "I-MusicalWork", "35": "B-ORG", "36": "I-ORG", "37": "B-OtherLOC", "38": "I-OtherLOC", "39": "B-OtherPER", "40": "I-OtherPER", "41": "B-OtherPROD", "42": "I-OtherPROD", "43": "B-Politician", "44": "I-Politician", "45": "B-PrivateCorp", "46": "I-PrivateCorp", "47": "B-PublicCorp", "48": "I-PublicCorp", "49": "B-Scientist", "50": "I-Scientist", "51": "B-Software", "52": "I-Software", "53": "B-SportsGRP", "54": "I-SportsGRP", "55": "B-SportsManager", "56": "I-SportsManager", "57": "B-Station", "58": "I-Station", "59": "B-Symptom", "60": "I-Symptom", "61": "B-Vehicle", "62": "I-Vehicle", "63": "B-VisualWork", "64": "I-VisualWork", "65": "B-WrittenWork", "66": "I-WrittenWork"}}}}], "splits": [{"name": "train", "num_bytes": 4375159, "num_examples": 16548}, {"name": "validation", "num_bytes": 229499, "num_examples": 857}], "download_size": 4492163, "dataset_size": 4604658}, {"config_name": "hi", "features": [{"name": "id", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "ner_tags", "sequence": {"class_label": {"names": {"0": "O", "1": "B-AerospaceManufacturer", "2": "I-AerospaceManufacturer", "3": "B-AnatomicalStructure", "4": "I-AnatomicalStructure", "5": "B-ArtWork", "6": "I-ArtWork", "7": "B-Artist", "8": "I-Artist", "9": "B-Athlete", "10": "I-Athlete", "11": "B-CarManufacturer", "12": "I-CarManufacturer", "13": "B-Cleric", "14": "I-Cleric", "15": "B-Clothing", "16": "I-Clothing", "17": "B-Disease", "18": "I-Disease", "19": "B-Drink", "20": "I-Drink", "21": "B-Facility", "22": "I-Facility", "23": "B-Food", "24": "I-Food", "25": "B-HumanSettlement", "26": "I-HumanSettlement", "27": "B-MedicalProcedure", "28": "I-MedicalProcedure", "29": "B-Medication/Vaccine", "30": "I-Medication/Vaccine", "31": "B-MusicalGRP", "32": "I-MusicalGRP", "33": "B-MusicalWork", "34": "I-MusicalWork", "35": "B-ORG", "36": "I-ORG", "37": "B-OtherLOC", "38": "I-OtherLOC", "39": "B-OtherPER", "40": "I-OtherPER", "41": "B-OtherPROD", "42": "I-OtherPROD", "43": "B-Politician", "44": "I-Politician", "45": "B-PrivateCorp", "46": "I-PrivateCorp", "47": "B-PublicCorp", "48": "I-PublicCorp", "49": "B-Scientist", "50": "I-Scientist", "51": "B-Software", "52": "I-Software", "53": "B-SportsGRP", "54": "I-SportsGRP", "55": "B-SportsManager", "56": "I-SportsManager", "57": "B-Station", "58": "I-Station", "59": "B-Symptom", "60": "I-Symptom", "61": "B-Vehicle", "62": "I-Vehicle", "63": "B-VisualWork", "64": "I-VisualWork", "65": "B-WrittenWork", "66": "I-WrittenWork"}}}}], "splits": [{"name": "train", "num_bytes": 4039051, "num_examples": 9632}, {"name": "validation", "num_bytes": 217741, "num_examples": 514}], "download_size": 4060184, "dataset_size": 4256792}, {"config_name": "it", "features": [{"name": "id", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "ner_tags", "sequence": {"class_label": {"names": {"0": "O", "1": "B-AerospaceManufacturer", "2": "I-AerospaceManufacturer", "3": "B-AnatomicalStructure", "4": "I-AnatomicalStructure", "5": "B-ArtWork", "6": "I-ArtWork", "7": "B-Artist", "8": "I-Artist", "9": "B-Athlete", "10": "I-Athlete", "11": "B-CarManufacturer", "12": "I-CarManufacturer", "13": "B-Cleric", "14": "I-Cleric", "15": "B-Clothing", "16": "I-Clothing", "17": "B-Disease", "18": "I-Disease", "19": "B-Drink", "20": "I-Drink", "21": "B-Facility", "22": "I-Facility", "23": "B-Food", "24": "I-Food", "25": "B-HumanSettlement", "26": "I-HumanSettlement", "27": "B-MedicalProcedure", "28": "I-MedicalProcedure", "29": "B-Medication/Vaccine", "30": "I-Medication/Vaccine", "31": "B-MusicalGRP", "32": "I-MusicalGRP", "33": "B-MusicalWork", "34": "I-MusicalWork", "35": "B-ORG", "36": "I-ORG", "37": "B-OtherLOC", "38": "I-OtherLOC", "39": "B-OtherPER", "40": "I-OtherPER", "41": "B-OtherPROD", "42": "I-OtherPROD", "43": "B-Politician", "44": "I-Politician", "45": "B-PrivateCorp", "46": "I-PrivateCorp", "47": "B-PublicCorp", "48": "I-PublicCorp", "49": "B-Scientist", "50": "I-Scientist", "51": "B-Software", "52": "I-Software", "53": "B-SportsGRP", "54": "I-SportsGRP", "55": "B-SportsManager", "56": "I-SportsManager", "57": "B-Station", "58": "I-Station", "59": "B-Symptom", "60": "I-Symptom", "61": "B-Vehicle", "62": "I-Vehicle", "63": "B-VisualWork", "64": "I-VisualWork", "65": "B-WrittenWork", "66": "I-WrittenWork"}}}}], "splits": [{"name": "train", "num_bytes": 4256854, "num_examples": 16579}, {"name": "validation", "num_bytes": 219489, "num_examples": 858}], "download_size": 4454712, "dataset_size": 4476343}, {"config_name": "pt", "features": [{"name": "id", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "ner_tags", "sequence": {"class_label": {"names": {"0": "O", "1": "B-AerospaceManufacturer", "2": "I-AerospaceManufacturer", "3": "B-AnatomicalStructure", "4": "I-AnatomicalStructure", "5": "B-ArtWork", "6": "I-ArtWork", "7": "B-Artist", "8": "I-Artist", "9": "B-Athlete", "10": "I-Athlete", "11": "B-CarManufacturer", "12": "I-CarManufacturer", "13": "B-Cleric", "14": "I-Cleric", "15": "B-Clothing", "16": "I-Clothing", "17": "B-Disease", "18": "I-Disease", "19": "B-Drink", "20": "I-Drink", "21": "B-Facility", "22": "I-Facility", "23": "B-Food", "24": "I-Food", "25": "B-HumanSettlement", "26": "I-HumanSettlement", "27": "B-MedicalProcedure", "28": "I-MedicalProcedure", "29": "B-Medication/Vaccine", "30": "I-Medication/Vaccine", "31": "B-MusicalGRP", "32": "I-MusicalGRP", "33": "B-MusicalWork", "34": "I-MusicalWork", "35": "B-ORG", "36": "I-ORG", "37": "B-OtherLOC", "38": "I-OtherLOC", "39": "B-OtherPER", "40": "I-OtherPER", "41": "B-OtherPROD", "42": "I-OtherPROD", "43": "B-Politician", "44": "I-Politician", "45": "B-PrivateCorp", "46": "I-PrivateCorp", "47": "B-PublicCorp", "48": "I-PublicCorp", "49": "B-Scientist", "50": "I-Scientist", "51": "B-Software", "52": "I-Software", "53": "B-SportsGRP", "54": "I-SportsGRP", "55": "B-SportsManager", "56": "I-SportsManager", "57": "B-Station", "58": "I-Station", "59": "B-Symptom", "60": "I-Symptom", "61": "B-Vehicle", "62": "I-Vehicle", "63": "B-VisualWork", "64": "I-VisualWork", "65": "B-WrittenWork", "66": "I-WrittenWork"}}}}], "splits": [{"name": "train", "num_bytes": 4587908, "num_examples": 16469}, {"name": "validation", "num_bytes": 233471, "num_examples": 854}], "download_size": 4622334, "dataset_size": 4821379}, {"config_name": "sv", "features": [{"name": "id", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "ner_tags", "sequence": {"class_label": {"names": {"0": "O", "1": "B-AerospaceManufacturer", "2": "I-AerospaceManufacturer", "3": "B-AnatomicalStructure", "4": "I-AnatomicalStructure", "5": "B-ArtWork", "6": "I-ArtWork", "7": "B-Artist", "8": "I-Artist", "9": "B-Athlete", "10": "I-Athlete", "11": "B-CarManufacturer", "12": "I-CarManufacturer", "13": "B-Cleric", "14": "I-Cleric", "15": "B-Clothing", "16": "I-Clothing", "17": "B-Disease", "18": "I-Disease", "19": "B-Drink", "20": "I-Drink", "21": "B-Facility", "22": "I-Facility", "23": "B-Food", "24": "I-Food", "25": "B-HumanSettlement", "26": "I-HumanSettlement", "27": "B-MedicalProcedure", "28": "I-MedicalProcedure", "29": "B-Medication/Vaccine", "30": "I-Medication/Vaccine", "31": "B-MusicalGRP", "32": "I-MusicalGRP", "33": "B-MusicalWork", "34": "I-MusicalWork", "35": "B-ORG", "36": "I-ORG", "37": "B-OtherLOC", "38": "I-OtherLOC", "39": "B-OtherPER", "40": "I-OtherPER", "41": "B-OtherPROD", "42": "I-OtherPROD", "43": "B-Politician", "44": "I-Politician", "45": "B-PrivateCorp", "46": "I-PrivateCorp", "47": "B-PublicCorp", "48": "I-PublicCorp", "49": "B-Scientist", "50": "I-Scientist", "51": "B-Software", "52": "I-Software", "53": "B-SportsGRP", "54": "I-SportsGRP", "55": "B-SportsManager", "56": "I-SportsManager", "57": "B-Station", "58": "I-Station", "59": "B-Symptom", "60": "I-Symptom", "61": "B-Vehicle", "62": "I-Vehicle", "63": "B-VisualWork", "64": "I-VisualWork", "65": "B-WrittenWork", "66": "I-WrittenWork"}}}}], "splits": [{"name": "train", "num_bytes": 3919442, "num_examples": 16363}, {"name": "validation", "num_bytes": 205910, "num_examples": 856}], "download_size": 4100785, "dataset_size": 4125352}, {"config_name": "uk", "features": [{"name": "id", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "ner_tags", "sequence": {"class_label": {"names": {"0": "O", "1": "B-AerospaceManufacturer", "2": "I-AerospaceManufacturer", "3": "B-AnatomicalStructure", "4": "I-AnatomicalStructure", "5": "B-ArtWork", "6": "I-ArtWork", "7": "B-Artist", "8": "I-Artist", "9": "B-Athlete", "10": "I-Athlete", "11": "B-CarManufacturer", "12": "I-CarManufacturer", "13": "B-Cleric", "14": "I-Cleric", "15": "B-Clothing", "16": "I-Clothing", "17": "B-Disease", "18": "I-Disease", "19": "B-Drink", "20": "I-Drink", "21": "B-Facility", "22": "I-Facility", "23": "B-Food", "24": "I-Food", "25": "B-HumanSettlement", "26": "I-HumanSettlement", "27": "B-MedicalProcedure", "28": "I-MedicalProcedure", "29": "B-Medication/Vaccine", "30": "I-Medication/Vaccine", "31": "B-MusicalGRP", "32": "I-MusicalGRP", "33": "B-MusicalWork", "34": "I-MusicalWork", "35": "B-ORG", "36": "I-ORG", "37": "B-OtherLOC", "38": "I-OtherLOC", "39": "B-OtherPER", "40": "I-OtherPER", "41": "B-OtherPROD", "42": "I-OtherPROD", "43": "B-Politician", "44": "I-Politician", "45": "B-PrivateCorp", "46": "I-PrivateCorp", "47": "B-PublicCorp", "48": "I-PublicCorp", "49": "B-Scientist", "50": "I-Scientist", "51": "B-Software", "52": "I-Software", "53": "B-SportsGRP", "54": "I-SportsGRP", "55": "B-SportsManager", "56": "I-SportsManager", "57": "B-Station", "58": "I-Station", "59": "B-Symptom", "60": "I-Symptom", "61": "B-Vehicle", "62": "I-Vehicle", "63": "B-VisualWork", "64": "I-VisualWork", "65": "B-WrittenWork", "66": "I-WrittenWork"}}}}], "splits": [{"name": "train", "num_bytes": 5104234, "num_examples": 16429}, {"name": "validation", "num_bytes": 261125, "num_examples": 851}], "download_size": 5245683, "dataset_size": 5365359}, {"config_name": "zh", "features": [{"name": "id", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "ner_tags", "sequence": {"class_label": {"names": {"0": "O", "1": "B-AerospaceManufacturer", "2": "I-AerospaceManufacturer", "3": "B-AnatomicalStructure", "4": "I-AnatomicalStructure", "5": "B-ArtWork", "6": "I-ArtWork", "7": "B-Artist", "8": "I-Artist", "9": "B-Athlete", "10": "I-Athlete", "11": "B-CarManufacturer", "12": "I-CarManufacturer", "13": "B-Cleric", "14": "I-Cleric", "15": "B-Clothing", "16": "I-Clothing", "17": "B-Disease", "18": "I-Disease", "19": "B-Drink", "20": "I-Drink", "21": "B-Facility", "22": "I-Facility", "23": "B-Food", "24": "I-Food", "25": "B-HumanSettlement", "26": "I-HumanSettlement", "27": "B-MedicalProcedure", "28": "I-MedicalProcedure", "29": "B-Medication/Vaccine", "30": "I-Medication/Vaccine", "31": "B-MusicalGRP", "32": "I-MusicalGRP", "33": "B-MusicalWork", "34": "I-MusicalWork", "35": "B-ORG", "36": "I-ORG", "37": "B-OtherLOC", "38": "I-OtherLOC", "39": "B-OtherPER", "40": "I-OtherPER", "41": "B-OtherPROD", "42": "I-OtherPROD", "43": "B-Politician", "44": "I-Politician", "45": "B-PrivateCorp", "46": "I-PrivateCorp", "47": "B-PublicCorp", "48": "I-PublicCorp", "49": "B-Scientist", "50": "I-Scientist", "51": "B-Software", "52": "I-Software", "53": "B-SportsGRP", "54": "I-SportsGRP", "55": "B-SportsManager", "56": "I-SportsManager", "57": "B-Station", "58": "I-Station", "59": "B-Symptom", "60": "I-Symptom", "61": "B-Vehicle", "62": "I-Vehicle", "63": "B-VisualWork", "64": "I-VisualWork", "65": "B-WrittenWork", "66": "I-WrittenWork"}}}}], "splits": [{"name": "train", "num_bytes": 3816980, "num_examples": 9759}, {"name": "validation", "num_bytes": 198669, "num_examples": 506}], "download_size": 3935986, "dataset_size": 4015649}]}
2023-01-05T03:00:49+00:00
332ffe76d488b894416602afc03392070d601394
# Dataset Card for "dreambooth-hackathon-rick-and-morty-images-2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Kayvane/dreambooth-hackathon-rick-and-morty-images-2
[ "region:us" ]
2022-12-28T17:11:56+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 3482571.0, "num_examples": 24}], "download_size": 3481016, "dataset_size": 3482571.0}}
2022-12-28T17:12:00+00:00
b80388a82b7271ee201063ae57f842990ad1fc87
Hexye/discord-scams
[ "license:mit", "region:us" ]
2022-12-28T17:42:39+00:00
{"license": "mit"}
2022-12-28T17:42:39+00:00
09fc389f0dd7de7d4fc331930bfd2ce434b2b9f9
# Dataset Card for "bookcorpus_deduplicated_small" First 10K(0.25%) examples of [bookcorpus_deduplicated](https://huggingface.co/datasets/saibo/bookcorpus_deduplicated) size: 7.4MB [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
saibo/bookcorpus_deduplicated_small
[ "region:us" ]
2022-12-28T18:55:18+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 7321888, "num_examples": 100000}], "download_size": 4495653, "dataset_size": 7321888}}
2022-12-29T16:14:32+00:00
c1f19114490437e2ac682f9936ac0ca6461d76b6
adamwatters/half-dome
[ "license:openrail", "region:us" ]
2022-12-28T19:01:49+00:00
{"license": "openrail"}
2022-12-28T19:19:46+00:00
788fb2722316ee7cad1ace2f6c94e563556a1d3e
### Roboflow Dataset Page [https://universe.roboflow.com/augmented-startups/football-player-detection-kucab](https://universe.roboflow.com/augmented-startups/football-player-detection-kucab?ref=roboflow2huggingface) ### Citation ``` @misc{ football-player-detection-kucab_dataset, title = { Football-Player-Detection Dataset }, type = { Open Source Dataset }, author = { Augmented Startups }, howpublished = { \url{ https://universe.roboflow.com/augmented-startups/football-player-detection-kucab } }, url = { https://universe.roboflow.com/augmented-startups/football-player-detection-kucab }, journal = { Roboflow Universe }, publisher = { Roboflow }, year = { 2022 }, month = { nov }, note = { visited on 2022-12-29 }, } ``` ### License CC BY 4.0 ### Dataset Summary This dataset was exported via roboflow.com on November 21, 2022 at 6:50 PM GMT Roboflow is an end-to-end computer vision platform that helps you * collaborate with your team on computer vision projects * collect & organize images * understand unstructured image data * annotate, and create datasets * export, train, and deploy computer vision models * use active learning to improve your dataset over time It includes 1232 images. Track-players-and-football are annotated in COCO format. The following pre-processing was applied to each image: * Auto-orientation of pixel data (with EXIF-orientation stripping) No image augmentation techniques were applied.
keremberke/football-object-detection
[ "task_categories:object-detection", "roboflow", "region:us" ]
2022-12-28T20:09:47+00:00
{"task_categories": ["object-detection"], "tags": ["roboflow"]}
2023-01-04T20:39:21+00:00
0d66f0f14193eeeb497776a71e21bcf0b5777cb7
lol
vukrosic/derambooth-vuk-512-images
[ "region:us" ]
2022-12-28T20:28:33+00:00
{}
2022-12-28T20:29:35+00:00
6f978bfcf9b201baad7430d5eb3fbb69782844af
patrickvonplaten/restore_punctuation_medium_num_beams_2
[ "speechbox_punc", "region:us" ]
2022-12-28T20:29:25+00:00
{"tags": ["speechbox_punc"]}
2022-12-29T09:22:49+00:00
ed873b4fc5281a659037fe97b50181545b62abdf
sinsforeal/haruhisky
[ "license:openrail", "region:us" ]
2022-12-28T20:29:29+00:00
{"license": "openrail"}
2022-12-28T20:31:07+00:00
acd3a37d1b876cd33d878ea96746264094fb6e62
patrickvonplaten/restore_punctuation_medium_num_beams_4
[ "speechbox_punc", "region:us" ]
2022-12-28T20:34:46+00:00
{"tags": ["speechbox_punc"]}
2022-12-29T09:27:10+00:00
5c3c780f639202cddad77fe8779a831a74e6b77d
patrickvonplaten/restore_punctuation_medium_num_beams_1
[ "speechbox_punc", "region:us" ]
2022-12-28T20:45:23+00:00
{"tags": ["speechbox_punc"]}
2022-12-28T21:14:09+00:00
893c727d4959743dfab76cef99651df912aaae61
patrickvonplaten/restore_punctuation_tiny_num_beams_1
[ "speechbox_punc", "region:us" ]
2022-12-28T21:09:30+00:00
{"tags": ["speechbox_punc"]}
2022-12-28T21:13:10+00:00
2d1124c71d7559fd5c10616f727541d311b1f254
patrickvonplaten/restore_punctuation_tiny_num_beams_2
[ "speechbox_punc", "region:us" ]
2022-12-28T21:11:24+00:00
{"tags": ["speechbox_punc"]}
2022-12-29T09:17:57+00:00
3133c4b64e534210df9da5f9625fc86e426932e8
patrickvonplaten/restore_punctuation_tiny_num_beams_4
[ "speechbox_punc", "region:us" ]
2022-12-28T21:12:13+00:00
{"tags": ["speechbox_punc"]}
2022-12-29T09:17:00+00:00
9ddbb4b4ad88d90d3ac7781dae58e1bf8affdddc
adamwatters/spongebob
[ "license:openrail", "region:us" ]
2022-12-28T22:02:34+00:00
{"license": "openrail"}
2022-12-28T22:08:39+00:00
acfeca643c1cd06839a782d84b8572f3d71b8e90
adamwatters/spongebobsquarepants
[ "license:openrail", "region:us" ]
2022-12-28T22:27:00+00:00
{"license": "openrail"}
2022-12-28T22:27:02+00:00
e2075fd0d605a4d8d9a9fd3b43a89474af584508
adamwatters/spongebob2
[ "license:openrail", "region:us" ]
2022-12-28T22:27:47+00:00
{"license": "openrail"}
2022-12-28T22:32:28+00:00
33232aa37505411d5d74d0e6bd31f44d7fe1ba9f
# Dataset Card for "dreambooth-hackathon-rick-and-morty-images-square" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Kayvane/dreambooth-hackathon-rick-and-morty-images-square
[ "region:us" ]
2022-12-28T22:46:25+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 2583462.0, "num_examples": 20}], "download_size": 2582753, "dataset_size": 2583462.0}}
2022-12-28T22:46:35+00:00
1e8155a9100872e105dccca07efa41d58dcb0d38
darcksky/treinodofilhao
[ "region:us" ]
2022-12-28T22:55:20+00:00
{}
2022-12-28T23:04:36+00:00
3374e9e6adbbcf37e3917bc754245129a7d2ef84
# Dataset Card for "medspeech" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
arnepeine/medspeech
[ "region:us" ]
2022-12-28T23:41:53+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 133517.0, "num_examples": 3}], "download_size": 126401, "dataset_size": 133517.0}}
2023-01-03T10:35:40+00:00
d5468c7d96684f3339b2ef145e27df21a912f9f5
robertmyers/bpt-static
[ "license:gpl-3.0", "region:us" ]
2022-12-29T01:05:42+00:00
{"license": "gpl-3.0"}
2023-01-23T05:58:06+00:00
226a666899a74cb3867a49f724c705a125d20671
adamwatters/multijam-avatar
[ "license:openrail", "region:us" ]
2022-12-29T02:19:56+00:00
{"license": "openrail"}
2022-12-29T02:26:08+00:00
11744ece8c44cefe140dfbac6be7f2de66600523
# Dataset Card for [Dataset Name] ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - jampatoisnli.github.io - **Repository:** - https://github.com/ruth-ann/jampatoisnli - **Paper:** - https://arxiv.org/abs/2212.03419 - **Point of Contact:** - Ruth-Ann Armsrong: [email protected] ### Dataset Summary JamPatoisNLI provides the first dataset for natural language inference in a creole language, Jamaican Patois. Many of the most-spoken low-resource languages are creoles. These languages commonly have a lexicon derived from a major world language and a distinctive grammar reflecting the languages of the original speakers and the process of language birth by creolization. This gives them a distinctive place in exploring the effectiveness of transfer from large monolingual or multilingual pretrained models. ### Supported Tasks and Leaderboards Natural language inference ### Languages Jamaican Patois ### Data Fields premise, hypothesis, label ### Data Splits Train: 250 Val: 200 Test: 200 ### Data set creation + Annotations Premise collection: 97% of examples from Twitter; remaining pulled from literature and online cultural website Hypothesis construction: For each premise, hypothesis written by native speaker (our first author) so that pair’s classification would be E, N or C Label validation: Random sample of 100 sentence pairs double annotated by fluent speakers ### Social Impact of Dataset JamPatoisNLI is a low-resource language dataset in an English-based Creole spoken in the Caribbean, Jamaican Patois. The creation of the dataset contributes to expanding the scope of NLP research to under-explored languages across the world. ### Dataset Curators [@ruth-ann](https://github.com/ruth-ann) ### Citation Information @misc{https://doi.org/10.48550/arxiv.2212.03419, doi = {10.48550/ARXIV.2212.03419}, url = {https://arxiv.org/abs/2212.03419}, author = {Armstrong, Ruth-Ann and Hewitt, John and Manning, Christopher}, keywords = {Computation and Language (cs.CL), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences, I.2.7}, title = {JamPatoisNLI: A Jamaican Patois Natural Language Inference Dataset}, publisher = {arXiv}, year = {2022}, copyright = {arXiv.org perpetual, non-exclusive license} } ### Contributions Thanks to Prof. Christopher Manning and John Hewitt for their contributions, guidance, facilitation and support related to the creation of this dataset.
Ruth-Ann/jampatoisnli
[ "task_categories:text-classification", "task_ids:natural-language-inference", "annotations_creators:expert-generated", "language_creators:expert-generated", "language_creators:found", "multilinguality:monolingual", "multilinguality:other-english-based-creole", "size_categories:n<1K", "source_datasets:original", "language:jam", "license:other", "creole", "low-resource-language", "arxiv:2212.03419", "region:us" ]
2022-12-29T05:22:50+00:00
{"annotations_creators": ["expert-generated"], "language_creators": ["expert-generated", "found"], "language": ["jam"], "license": ["other"], "multilinguality": ["monolingual", "other-english-based-creole"], "size_categories": ["n<1K"], "source_datasets": ["original"], "task_categories": ["text-classification"], "task_ids": ["natural-language-inference"], "pretty_name": "JamPatoisNLI", "tags": ["creole", "low-resource-language"]}
2022-12-31T03:25:34+00:00
1ef34fc1b87fe5f5d99e89da477c37499d69866a
# Dataset Card for [opus] ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description **Disclaimer.** Loading of dataset is slow, thus it may not be feasible when loading at scale. I'd suggest to use the other OPUS datasets on Huggingface which loads a specific corpus. Loads [OPUS](https://opus.nlpl.eu/) as HuggingFace dataset. OPUS is an open parallel corpus covering 700+ languages and 1100+ datasets. Given a `src` and `tgt` language, this repository can load *all* available parallel corpus. To my knowledge, other OPUS datasets on Huggingface loads a specific corpus **Requirements**. ``` pip install pandas # pip install my fork of `opustools` git clone https://github.com/larrylawl/OpusTools.git pip install -e OpusTools/opustools_pkg ``` **Example Usage**. ``` # args follows `opustools`: https://pypi.org/project/opustools/ src="en" tgt="id" download_dir="data" # dir to save downloaded files corpus="bible-uedin" # corpus name. Leave as `None` to download all available corpus for the src-tgt pair. dataset = load_dataset("larrylawl/opus", src=src, tgt=tgt, download_dir=download_dir, corpus=corpus) ) ``` **Disclaimer**. This repository is still in active development. Do make a PR if there're any issues! ### Dataset Summary [More Information Needed] ### Supported Tasks and Leaderboards [More Information Needed] ### Languages Available languages can be viewed on the [OPUS API](https://opus.nlpl.eu/opusapi/?languages=True) ## Dataset Structure ### Data Instances ``` {'src': 'In the beginning God created the heavens and the earth .', 'tgt': 'Pada mulanya , waktu Allah mulai menciptakan alam semesta'} ``` ### Data Fields ``` features = { "src": datasets.Value("string"), "tgt": datasets.Value("string"), } ``` ### Data Splits Merged all data into train split. ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@larrylawl](https://larrylawl.github.io/) for adding this dataset.
larrylawl/opus
[ "task_categories:translation", "annotations_creators:expert-generated", "annotations_creators:found", "language_creators:found", "language_creators:expert-generated", "multilinguality:translation", "parallel-corpus", "region:us" ]
2022-12-29T06:08:54+00:00
{"annotations_creators": ["expert-generated", "found"], "language_creators": ["found", "expert-generated"], "license": [], "multilinguality": ["translation"], "size_categories": [], "source_datasets": [], "task_categories": ["translation"], "task_ids": [], "pretty_name": "opus", "tags": ["parallel-corpus"]}
2023-01-17T03:03:16+00:00
ad0eefef4ed9f64ec797d4d6281629062d5f4100
# Dataset Card for "beats-mixes" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
djmix/beats-mixes
[ "region:us" ]
2022-12-29T06:16:48+00:00
{"dataset_info": {"features": [{"name": "mix_id", "dtype": "string"}, {"name": "beats", "sequence": "float64"}], "splits": [{"name": "train", "num_bytes": 425961256, "num_examples": 5040}], "download_size": 244903841, "dataset_size": 425961256}}
2022-12-29T06:17:19+00:00
02d61b31c87b7fb5857c86540f5c4b5fcd76bfd9
# Dataset Card for "beats-tracks" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
djmix/beats-tracks
[ "region:us" ]
2022-12-29T06:17:48+00:00
{"dataset_info": {"features": [{"name": "track_id", "dtype": "string"}, {"name": "beats", "sequence": "float64"}], "splits": [{"name": "train", "num_bytes": 402775482, "num_examples": 63038}], "download_size": 118607513, "dataset_size": 402775482}}
2022-12-29T06:18:17+00:00
8fb1fe1c91f7c8ac8cfc4dec4ca6ebc13a16efc5
# Dataset Card for "transitions" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
djmix/transitions
[ "region:us" ]
2022-12-29T06:34:38+00:00
{"dataset_info": {"features": [{"name": "tran_id", "dtype": "string"}, {"name": "mix_id", "dtype": "string"}, {"name": "i_tran", "dtype": "int32"}, {"name": "i_track_prev", "dtype": "int32"}, {"name": "i_track_next", "dtype": "int32"}, {"name": "track_id_prev", "dtype": "string"}, {"name": "track_id_next", "dtype": "string"}, {"name": "match_rate_prev", "dtype": "float32"}, {"name": "match_rate_next", "dtype": "float32"}, {"name": "matched_beats_prev", "dtype": "int32"}, {"name": "matched_beats_next", "dtype": "int32"}, {"name": "overlap_wpts", "dtype": "int32"}, {"name": "overlap_beats", "dtype": "float32"}, {"name": "tran_wpts", "dtype": "int32"}, {"name": "extra_wpts_prev", "dtype": "int32"}, {"name": "extra_wpts_next", "dtype": "int32"}, {"name": "extra_beats_prev", "dtype": "float32"}, {"name": "extra_beats_next", "dtype": "float32"}, {"name": "last_wpt_prev", "dtype": "int32"}, {"name": "last_wpt_next", "dtype": "int32"}, {"name": "total_wpt_prev", "dtype": "int32"}, {"name": "total_wpt_next", "dtype": "int32"}, {"name": "matched_time_mix_prev", "dtype": "float32"}, {"name": "matched_time_mix_next", "dtype": "float32"}, {"name": "matched_time_track_prev", "dtype": "float32"}, {"name": "matched_time_track_next", "dtype": "float32"}, {"name": "timestamp_prev", "dtype": "float32"}, {"name": "timestamp_next", "dtype": "float32"}, {"name": "case_name_prev", "dtype": "string"}, {"name": "case_name_next", "dtype": "string"}, {"name": "feature_prev", "dtype": "string"}, {"name": "feature_next", "dtype": "string"}, {"name": "metric_prev", "dtype": "string"}, {"name": "metric_next", "dtype": "string"}, {"name": "key_change_prev", "dtype": "int32"}, {"name": "key_change_next", "dtype": "int32"}, {"name": "mix_cue_in_beat_prev", "dtype": "int32"}, {"name": "mix_cue_in_beat_next", "dtype": "int32"}, {"name": "mix_cue_out_beat_prev", "dtype": "int32"}, {"name": "mix_cue_out_beat_next", "dtype": "int32"}, {"name": "track_cue_in_beat_prev", "dtype": "int32"}, {"name": "track_cue_in_beat_next", "dtype": "int32"}, {"name": "track_cue_out_beat_prev", "dtype": "int32"}, {"name": "track_cue_out_beat_next", "dtype": "int32"}, {"name": "mix_cue_in_time_prev", "dtype": "float32"}, {"name": "mix_cue_in_time_next", "dtype": "float32"}, {"name": "mix_cue_out_time_prev", "dtype": "float32"}, {"name": "mix_cue_out_time_next", "dtype": "float32"}, {"name": "track_cue_in_time_prev", "dtype": "float32"}, {"name": "track_cue_in_time_next", "dtype": "float32"}, {"name": "track_cue_out_time_prev", "dtype": "float32"}, {"name": "track_cue_out_time_next", "dtype": "float32"}, {"name": "cost_prev", "dtype": "float32"}, {"name": "cost_next", "dtype": "float32"}, {"name": "wp_prev", "sequence": {"sequence": "int32"}}, {"name": "wp_next", "sequence": {"sequence": "int32"}}, {"name": "wp_raw_prev", "sequence": {"sequence": "int32"}}, {"name": "wp_raw_next", "sequence": {"sequence": "int32"}}], "splits": [{"name": "train", "num_bytes": 3980668452, "num_examples": 64748}], "download_size": 1355715395, "dataset_size": 3980668452}}
2022-12-29T06:37:52+00:00
87519c39b1886a35e6c94dd1e39211a8587cd394
# Dataset Card for XAlign ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Known Limitations](#known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [XAlign homepage](https://github.com/tushar117/XAlign) - **Repository:** [XAlign repo](https://github.com/tushar117/XAlign) - **Paper:** [XAlign: Cross-lingual Fact-to-Text Alignment and Generation for Low-Resource Languages](https://arxiv.org/abs/2202.00291) - **Leaderboard:** [Papers With Code Leaderboard for XAlign](https://paperswithcode.com/sota/data-to-text-generation-on-xalign) - **Point of Contact:** [Tushar Abhishek]([email protected]) ### Dataset Summary It consists of an extensive collection of a high quality cross-lingual fact-to-text dataset where facts are in English and corresponding sentences are in native language for person biographies. The Train & validation splits are created using distant supervision methods and Test data is generated through human annotations. ### Supported Tasks and Leaderboards - 'Data-to-text Generation': XAlign dataset can be used to train cross-lingual data-to-text generation models. The model performance can measured through any text generation evaluation metrics by taking average across all the languages. [Sagare et al. (2022)](https://arxiv.org/abs/2209.11252) reported average BLEU score of 29.27 and average METEOR score of 53.64 over the test set. - 'Relation Extraction': XAlign could also be used for cross-lingual relation extraction where relations in English can be extracted from associated native sentence. See [Papers With Code Leaderboard](https://paperswithcode.com/sota/data-to-text-generation-on-xalign) for more models. ### Languages Assamese (as), Bengali (bn), Gujarati (gu), Hindi (hi), Kannada (kn), Malayalam (ml), Marathi (mr), Oriya (or), Punjabi (pa), Tamil (ta), Telugu (te), and English (en). ## Dataset Structure ### Data Fields Each record consist of the following entries: - sentence (string) : Native language wikipedia sentence. (non-native language strings were removed.) - `facts` (List[Dict]) : List of facts associated with the sentence where each fact is stored as dictionary. - language (string) : Language identifier. The `facts` key contains list of facts where each facts is stored as dictionary. A single record within fact list contains following entries: - subject (string) : central entity. - object (string) : entity or a piece of information about the subject. - predicate (string) : relationship that connects the subject and the object. - qualifiers (List[Dict]) : It provide additional information about the fact, is stored as list of qualifier where each record is a dictionary. The dictionary contains two keys: qualifier_predicate to represent property of qualifer and qualifier_object to store value for the qualifier's predicate. ### Data Instances Example from English ``` { "sentence": "Mark Paul Briers (born 21 April 1968) is a former English cricketer.", "facts": [ { "subject": "Mark Briers", "predicate": "date of birth", "object": "21 April 1968", "qualifiers": [] }, { "subject": "Mark Briers", "predicate": "occupation", "object": "cricketer", "qualifiers": [] }, { "subject": "Mark Briers", "predicate": "country of citizenship", "object": "United Kingdom", "qualifiers": [] } ], "language": "en" } ``` Example from one of the low-resource languages (i.e. Hindi) ``` { "sentence": "बोरिस पास्तेरनाक १९५८ में साहित्य के क्षेत्र में नोबेल पुरस्कार विजेता रहे हैं।", "facts": [ { "subject": "Boris Pasternak", "predicate": "nominated for", "object": "Nobel Prize in Literature", "qualifiers": [ { "qualifier_predicate": "point in time", "qualifier_subject": "1958" } ] } ], "language": "hi" } ``` ### Data Splits The XAlign dataset has 3 splits: train, validation, and test. Below are the statistics the dataset. | Dataset splits | Number of Instances in Split | | --- | --- | | Train | 499155 | | Validation | 55469 | | Test | 7425 | ## Dataset Creation ### Curation Rationale Most of the existing Data-to-Text datasets are available in English. Also, the structured Wikidata entries for person entities in low resource languages are minuscule in number compared to that in English. Thus, monolingual Data-to-Text for low resource languages suffers from data sparsity. XAlign dataset would be useful in creation of cross-lingual Data-to-Text generation systems that take a set of English facts as input and generates a sentence capturing the fact-semantics in the specified language. ### Source Data #### Initial Data Collection and Normalization The dataset creation process starts with an intial list of ~95K person entities selected from Wikidata and each of which has a link to a corresponding Wikipedia page in at least one of our 11 low resource languages. This leads to a dataset where every instance is a tuple containing entityID, English Wikidata facts, language identifier, Wikipedia URL for the entityID. The facts (in English) are extracted from the 20201221 WikiData dump for each entity using the [WikiData](https://query.wikidata.org) APIs. The facts are gathered only for the speficied Wikidata property (or relation) types that captures most useful factual information for person entities: WikibaseItem, Time, Quantity, and Monolingualtext.This leads to overall ~0.55M data instances across all the 12 languages. Also, for each language, the sentences (along with section information) are extracted from 20210520 Wikipedia XML dump using the pre-processing steps as described [here](https://arxiv.org/abs/2202.00291). For every (entity, language) pair, the pre-processed dataset contains a set of English Wikidata facts and a set of Wikipedia sentences in that language. In order to create train and validation dataset, these are later passed through a two-stage automatic aligner as proposed in [abhishek et al. (2022)](https://arxiv.org/abs/2202.00291) to associate a sentence with a subset of facts. #### Who are the source language producers? The text are extracted from Wikipedia and facts are retrieved from Wikidata. ### Annotations #### Annotation process The Manual annotation of Test dataset was done in two phases. For both the phases, the annotators were presented with (low resource language sentence, list of English facts). They were asked to mark facts present in the given sentence. There were also specific guidelines to ignore redundant facts, handle abbreviations, etc. More detailed annotation guidelines and ethical statement are mentioned [here](https://docs.google.com/document/d/1ucGlf-Jm1ywQ_Fjw9f2UqPeMWPlBnlZA46UY7KuZ0EE/edit) . In the first phase, we got 60 instances labeled per language by a set of 8 expert annotators (trusted graduate students who understood the task very well). In phase 2, we selected 8 annotators per language from the [National Register of Translators](https://www.ntm.org.in/languages/english/nrtdb.aspx}). We tested these annotators using phase 1 data as golden control set, and shortlisted up to 4 annotators per language who scored highest (on Kappa score with golden annotations). #### Who are the annotators? Human annotators were selected appropriately (after screening) from [National Translation Mission](https://www.ntm.org.in) for Test set creation. ### Personal and Sensitive Information The dataset does not involve collection or storage of any personally identifiable information or offensive information at any stage. ## Considerations for Using the Data ### Social Impact of Dataset The purpose of the this dataset is to help develop cross-lingual Data-to-Text generation systems that are vital in many downstream Natural Language Processing (NLP) applications like automated dialog systems, domain-specific chatbots, open domain question answering, authoring sports reports, etc. These systems will be useful for powering business applications like Wikipedia text generation given English Infoboxes, automated generation of non-English product descriptions using English product attributes, etc. ### Known Limitations The XAlign dataset focus only on person biographies and system developed on this dataset might not be generalized to other domains. ## Additional Information ### Dataset Curators This dataset is collected by Tushar Abhishek, Shivprasad Sagare, Bhavyajeet Singh, Anubhav Sharma, Manish Gupta and Vasudeva Varma of Information Retrieval and Extraction Lab (IREL), Hyderabad, India. They released [scripts](https://github.com/tushar117/xalign) to collect and process the data into the Data-to-Text format. ### Licensing Information The XAlign dataset is released under the [MIT License](https://github.com/tushar117/XAlign/blob/main/LICENSE). ### Citation Information ``` @article{abhishek2022xalign, title={XAlign: Cross-lingual Fact-to-Text Alignment and Generation for Low-Resource Languages}, author={Abhishek, Tushar and Sagare, Shivprasad and Singh, Bhavyajeet and Sharma, Anubhav and Gupta, Manish and Varma, Vasudeva}, journal={arXiv preprint arXiv:2202.00291}, year={2022} } ``` ### Contributions Thanks to [Tushar Abhishek](https://github.com/tushar117), [Shivprasad Sagare](https://github.com/ShivprasadSagare), [Bhavyajeet Singh](https://github.com/bhavyajeet), [Anubhav Sharma](https://github.com/anubhav-sharma13), [Manish Gupta](https://github.com/blitzprecision) and [Vasudeva Varma]([email protected]) for adding this dataset. Additional thanks to the annotators from National Translation Mission for their crucial contributions to creation of the test dataset: Bhaswati Bhattacharya, Aditi Sarkar, Raghunandan B. S., Satish M., Rashmi G.Rao, Vidyarashmi PN, Neelima Bhide, Anand Bapat, Krishna Rao N V, Nagalakshmi DV, Aditya Bhardwaj Vuppula, Nirupama Patel, Asir. T, Sneha Gupta, Dinesh Kumar, Jasmin Gilani, Vivek R, Sivaprasad S, Pranoy J, Ashutosh Bharadwaj, Balaji Venkateshwar, Vinkesh Bansal, Vaishnavi Udyavara, Ramandeep Singh, Khushi Goyal, Yashasvi LN Pasumarthy and Naren Akash.
tushar117/xalign
[ "task_categories:table-to-text", "task_ids:rdf-to-text", "annotations_creators:found", "language_creators:crowdsourced", "multilinguality:multilingual", "size_categories:100K<n<1M", "source_datasets:original", "language:as", "language:bn", "language:gu", "language:hi", "language:kn", "language:ml", "language:mr", "language:or", "language:pa", "language:ta", "language:te", "language:en", "license:cc-by-nc-sa-4.0", "license:mit", "xalign", "NLG", "low-resource", "LRL", "arxiv:2202.00291", "arxiv:2209.11252", "region:us" ]
2022-12-29T06:50:10+00:00
{"annotations_creators": ["found"], "language_creators": ["crowdsourced"], "language": ["as", "bn", "gu", "hi", "kn", "ml", "mr", "or", "pa", "ta", "te", "en"], "license": ["cc-by-nc-sa-4.0", "mit"], "multilinguality": ["multilingual"], "size_categories": ["100K<n<1M"], "source_datasets": ["original"], "task_categories": ["table-to-text"], "task_ids": ["rdf-to-text"], "paperswithcode_id": "xalign", "pretty_name": "XAlign", "configs": ["release_v1"], "tags": ["xalign", "NLG", "low-resource", "LRL"]}
2023-01-01T20:39:30+00:00
5d5e4a5187d8f54ace4d58e275f9df4cccc3ff59
# Naver 영화 평점 데이터셋
Blpeng/nsmc
[ "region:us" ]
2022-12-29T07:18:03+00:00
{}
2022-12-29T07:27:12+00:00
77828c01279313d129743cbc8bdc71f6931873eb
<div align="center"> <img width="640" alt="keremberke/csgo-object-detection" src="https://huggingface.co/datasets/keremberke/csgo-object-detection/resolve/main/thumbnail.jpg"> </div> ### Dataset Labels ``` ['ct', 'cthead', 't', 'thead'] ``` ### Number of Images ```json {'train': 3879, 'valid': 383, 'test': 192} ``` ### How to Use - Install [datasets](https://pypi.org/project/datasets/): ```bash pip install datasets ``` - Load the dataset: ```python from datasets import load_dataset ds = load_dataset("keremberke/csgo-object-detection", name="full") example = ds['train'][0] ``` ### Roboflow Dataset Page [https://universe.roboflow.com/asd-culfr/wlots/dataset/1](https://universe.roboflow.com/asd-culfr/wlots/dataset/1?ref=roboflow2huggingface) ### Citation ``` @misc{ wlots_dataset, title = { wlots Dataset }, type = { Open Source Dataset }, author = { asd }, howpublished = { \\url{ https://universe.roboflow.com/asd-culfr/wlots } }, url = { https://universe.roboflow.com/asd-culfr/wlots }, journal = { Roboflow Universe }, publisher = { Roboflow }, year = { 2022 }, month = { may }, note = { visited on 2023-01-27 }, } ``` ### License CC BY 4.0 ### Dataset Summary This dataset was exported via roboflow.com on December 28, 2022 at 8:08 PM GMT Roboflow is an end-to-end computer vision platform that helps you * collaborate with your team on computer vision projects * collect & organize images * understand unstructured image data * annotate, and create datasets * export, train, and deploy computer vision models * use active learning to improve your dataset over time It includes 4454 images. Ct-cthead-t-thead are annotated in COCO format. The following pre-processing was applied to each image: * Auto-orientation of pixel data (with EXIF-orientation stripping) * Resize to 416x416 (Fill (with center crop)) The following augmentation was applied to create 3 versions of each source image: * Random brigthness adjustment of between -15 and +15 percent
keremberke/csgo-object-detection
[ "task_categories:object-detection", "roboflow", "roboflow2huggingface", "region:us" ]
2022-12-29T07:37:55+00:00
{"task_categories": ["object-detection"], "tags": ["roboflow", "roboflow2huggingface"]}
2023-01-27T13:39:19+00:00
39db12a85bcbf6eba606c22c96a06577b1e69acf
teradakokoro/open
[ "region:us" ]
2022-12-29T07:56:01+00:00
{}
2023-08-22T03:44:15+00:00
57aa02d263a6a82015660874b25765e58cb765be
# Dataset Card for reddit_one_ups_2014 ## Dataset Description - **Homepage:** https://github.com/Georeactor/reddit-one-ups ### Dataset Summary Reddit 'one-ups' or 'clapbacks' - replies which scored higher than the original comments. This task makes one-ups easier by focusing on a set of common, often meme-like replies (e.g. 'yes', 'nope', '(͡°͜ʖ͡°)'). For commentary on predictions with a previous version of the dataset, see https://blog.goodaudience.com/can-deepclapback-learn-when-to-lol-e4a2092a8f2c For unique / non-meme seq2seq version of this dataset, see https://huggingface.co/datasets/georeactor/reddit_one_ups_seq2seq_2014 Replies were selected from PushShift's archive of posts from 2014. ### Supported Tasks Text classification task: finding the common reply (out of ~37) to match the parent comment text. Text prediction task: estimating the vote score, or parent:reply ratio, of a meme response, as a measure of relevancy/cleverness of reply. ### Languages Primarily English - includes some emoticons such as ┬─┬ノ(ಠ_ಠノ) ## Dataset Structure ### Data Instances 29,375 rows ### Data Fields - id: the Reddit alphanumeric ID for the reply - body: the content of the original reply - score: the net vote score of the original reply - parent_id: the Reddit alphanumeric ID for the parent - author: the Reddit username of the reply - subreddit: the Reddit community where the discussion occurred - parent_score: the net vote score of the parent comment - cleantext: the simplified reply (one of 37 classes) - tstamp: the timestamp of the reply - parent_body: the content of the original parent ## Dataset Creation ### Source Data Reddit comments collected through PushShift.io archives for 2014. #### Initial Data Collection and Normalization - Removed deleted or empty comments. - Selected only replies which scored 1.5x higher than a parent comment, where both have a positive score. - Found the top/repeating phrases common to these one-ups/clapback comments. - Selected only replies which had one of these top/repeating phrases. - Made rows in PostgreSQL and output as CSV. ## Considerations for Using the Data Comments and responses in the Reddit archives and output datasets all include NSFW and otherwise toxic language and links! - You can use the subreddit and score columns to filter content. - Imbalanced dataset: replies 'yes' and 'no' are more common than others. - Overlap of labels: replies such as 'yes', 'yep', and 'yup' serve similar purposes; in other cases 'no' vs. 'nope' may be interesting. - Timestamps: the given timestamp may help identify trends in meme replies - Usernames: a username was included to identify the 'username checks out' meme, but this was not common enough in 2014, and the included username is from the reply. Reddit comments are properties of Reddit and comment owners using their Terms of Service.
georeactor/reddit_one_ups_2014
[ "task_categories:text-classification", "language:en", "reddit", "not-for-all-eyes", "not-for-all-audiences", "region:us" ]
2022-12-29T08:23:42+00:00
{"language": "en", "task_categories": ["text-classification"], "tags": ["reddit", "not-for-all-eyes", "not-for-all-audiences"]}
2023-03-28T21:02:40+00:00
48a29533105e0f6bd59070cdc9ceda2723183d5c
# Dataset Card for reddit_one_ups_seq2seq_2014 ## Dataset Description - **Homepage:** https://github.com/Georeactor/reddit-one-ups ### Dataset Summary Reddit 'one-ups' or 'clapbacks' - replies which scored higher than the original comments. This dataset chose freeform replies, which did not follow repetitive meme replies. The IAmA subreddit was excluded to avoid an issue where their answers frequently score higher than questions. For commentary on predictions with a previous version of the dataset, see https://blog.goodaudience.com/can-deepclapback-learn-when-to-lol-e4a2092a8f2c For meme / text-classification version of this dataset, see https://huggingface.co/datasets/georeactor/reddit_one_ups_2014 Replies were selected from PushShift's archive of posts from 2014. ### Supported Tasks seq2seq writing of replies to Reddit comments ### Languages Primarily English - includes some emoticons such as ┬─┬ノ(ಠ_ಠノ) ## Dataset Structure ### Data Instances 19,992 rows ### Data Fields - id: the Reddit alphanumeric ID for the reply - body: the content of the original reply - score: the net vote score of the original reply - parent_id: the Reddit alphanumeric ID for the parent - author: the Reddit username of the reply - subreddit: the Reddit community where the discussion occurred - parent_score: the net vote score of the parent comment - tstamp: the timestamp of the reply - parent_body: the content of the original parent ## Dataset Creation ### Source Data Reddit comments collected through PushShift.io archives for 2014. #### Initial Data Collection and Normalization - Removed deleted or empty comments. - Selected only replies which scored 1.5x higher than a parent comment, where both have a positive score. - Found top/repeating phrases common to these one-ups/clapback comments; selected only replies which DID NOT have these phrases. - Selected the top-scored ~1,667 replies from each month in 2014, avoiding /r/IAmA. - Made rows in PostgreSQL and output as CSV. ## Considerations for Using the Data Comments and responses in the Reddit archives and output datasets all include NSFW and otherwise toxic language and links! You can use the subreddit and score columns to filter, and subreddit and timestamps to improve predictions of reply content. Reddit comments are properties of Reddit and comment owners using their Terms of Service.
georeactor/reddit_one_ups_seq2seq_2014
[ "language:en", "reddit", "not-for-all-eyes", "not-for-all-audiences", "region:us" ]
2022-12-29T08:25:12+00:00
{"language": "en", "tags": ["reddit", "not-for-all-eyes", "not-for-all-audiences"]}
2023-03-28T21:01:50+00:00
0f769cdd5e8f1a6ab5e5cc2dfb2819c34bc319a7
COVID-19 Epidemic Weibo Emotional Dataset, the content of Weibo in this dataset is the epidemic Weibo obtained by using relevant keywords to filter during the epidemic, and its content is related to COVID-19. Each tweet is labeled as one of the following six categories: neutral (no emotion), happy (positive), angry (angry), sad (sad), fear (fear), surprise (surprise) The COVID-19 Weibo training dataset includes 8,606 Weibos, the validation set contains 2,000 Weibos, and the test dataset contains 3,000 Weibos. 疫情微博数据集,该数据集内的微博内容是在疫情期间使用相关关键字筛选获得的疫情微博,其内容与新冠疫情相关。 每条微博被标注为以下六个类别之一:neutral(无情绪)、happy(积极)、angry(愤怒)、sad(悲伤)、fear(恐惧)、surprise(惊奇) 疫情微博训练数据集包括8,606条微博,验证集包含2,000条微博,测试数据集包含3,000条微博。
souljoy/COVID-19_weibo_emotion
[ "region:us" ]
2022-12-29T09:05:37+00:00
{}
2022-12-29T09:42:16+00:00
ebfcf2c45f5a0ead919889674965e6eec8c6670a
# SICK_PL - Sentences Involving Compositional Knowledge (Polish) ### Dataset Summary This dataset is a manually translated version of popular English natural language inference (NLI) corpus consisting of 10,000 sentence pairs. NLI is the task of determining whether one statement (premise) semantically entails other statement (hypothesis). Such relation can be classified as entailment (if the first sentence entails second sentence), neutral (the first statement does not determine the truth value of the second statement), or contradiction (if the first sentence is true, the second is false). Additionally, the original SICK dataset contains semantic relatedness scores for the sentence pairs as real numbers ranging from 1 to 5. When translating the corpus to Polish, we tried to be as close as possible to the original meaning. In some cases, however, two different English sentences had an identical translation in Polish. Such instances were slightly modified in order to preserve both the meaning and the syntactic differences in sentence pair. ### Data Instances Example instance: ``` { "pair_ID": "122", "sentence_A": "Pięcioro dzieci stoi blisko siebie , a jedno dziecko ma pistolet", "sentence_B": "Pięcioro dzieci stoi blisko siebie i żadne z nich nie ma pistoletu", "relatedness_score": 3.7, "entailment_judgment": "CONTRADICTION" } ``` ### Data Fields - pair_ID: sentence pair ID - sentence_A: sentence A - sentence_B: sentence B - entailment_judgment: textual entailment gold label: entailment (0), neutral (1) or contradiction (2) - relatedness_score: semantic relatedness gold score (on a 1-5 continuous scale) ### Citation Information ``` @inproceedings{dadas-etal-2020-evaluation, title = "Evaluation of Sentence Representations in {P}olish", author = "Dadas, Slawomir and Pere{\l}kiewicz, Micha{\l} and Po{\'s}wiata, Rafa{\l}", booktitle = "Proceedings of the 12th Language Resources and Evaluation Conference", month = may, year = "2020", address = "Marseille, France", publisher = "European Language Resources Association", url = "https://aclanthology.org/2020.lrec-1.207", pages = "1674--1680", language = "English", ISBN = "979-10-95546-34-4", } ```
sdadas/sick_pl
[ "task_categories:text-classification", "task_ids:natural-language-inference", "task_ids:semantic-similarity-scoring", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:sick", "language:pl", "license:cc-by-nc-sa-3.0", "region:us" ]
2022-12-29T10:04:41+00:00
{"language": ["pl"], "license": ["cc-by-nc-sa-3.0"], "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "source_datasets": ["sick"], "task_categories": ["text-classification"], "task_ids": ["natural-language-inference", "semantic-similarity-scoring"], "pretty_name": "Sentences Involving Compositional Knowledge (Polish)", "dataset_info": {"features": [{"name": "pair_ID", "dtype": "string"}, {"name": "sentence_A", "dtype": "string"}, {"name": "sentence_B", "dtype": "string"}, {"name": "relatedness_score", "dtype": "float32"}, {"name": "entailment_judgment", "dtype": "string"}], "splits": [{"name": "train"}, {"name": "validation"}, {"name": "test"}]}}
2022-12-29T11:01:28+00:00
073012dc81efb47b0fd66b0ab48bf06ddb62f528
# PPC - Polish Paraphrase Corpus ### Dataset Summary Polish Paraphrase Corpus contains 7000 manually labeled sentence pairs. The dataset was divided into training, validation and test splits. The training part includes 5000 examples, while the other parts contain 1000 examples each. The main purpose of creating such a dataset was to verify how machine learning models perform in the challenging problem of paraphrase identification, where most records contain semantically overlapping parts. Technically, this is a three-class classification task, where each record can be assigned to one of the following categories: - Exact paraphrases - Sentence pairs that convey exactly the same information. We are interested only in the semantic meaning of the sentence, therefore this category also includes sentences that are semantically identical but, for example, have different emotional emphasis. - Close paraphrases - Sentence pairs with similar semantic meaning. In this category we include all pairs which contain the same information, but in addition to it there may be other semantically non-overlapping parts. This category also contains context-dependent paraphrases - sentence pairs that may have the same meaning in some contexts but are different in others. - Non-paraphrases - All other cases, including contradictory sentences and semantically unrelated sentences. The corpus contains 2911, 1297, and 2792 examples for the above three categories, respectively. The process of annotating the dataset was preceded by an automated generation of candidate pairs, which were then manually labeled. We experimented with two popular techniques of generating possible paraphrases: backtranslation with a set of neural machine translation models and paraphrase mining using a pre-trained multilingual sentence encoder. The extracted sentence pairs are drawn from different data sources: Taboeba, Polish news articles, Wikipedia and Polish version of SICK dataset. Since most of the sentence pairs obtained in this way fell into the first two categories, in order to balance the dataset, some of the examples were manually modified to convey different information. In this way, even negative examples often have high semantic overlap, making this problem difficult for machine learning models. ### Data Instances Example instance: ``` { "sentence_A": "Libia: lotnisko w w Trypolisie ostrzelane rakietami.", "sentence_B": "Jedyne lotnisko w stolicy Libii - Trypolisie zostało w nocy z wtorku na środę ostrzelane rakietami.", "label": "2" } ``` ### Data Fields - sentence_A: first sentence text - sentence_B: second sentence text - label: label identifier corresponding to one of three categories ### Citation Information ``` @inproceedings{9945218, author={Dadas, S{\l}awomir}, booktitle={2022 IEEE International Conference on Systems, Man, and Cybernetics (SMC)}, title={Training Effective Neural Sentence Encoders from Automatically Mined Paraphrases}, year={2022}, volume={}, number={}, pages={371-378}, doi={10.1109/SMC53654.2022.9945218} } ```
sdadas/ppc
[ "task_categories:text-classification", "task_ids:semantic-similarity-classification", "multilinguality:monolingual", "size_categories:1K<n<10K", "language:pl", "license:cc-by-nc-sa-4.0", "region:us" ]
2022-12-29T10:11:25+00:00
{"language": ["pl"], "license": ["cc-by-nc-sa-4.0"], "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "task_categories": ["text-classification"], "task_ids": ["semantic-similarity-classification"], "pretty_name": "Polish Paraphrase Corpus", "dataset_info": {"features": [{"name": "sentence_A", "dtype": "string"}, {"name": "sentence_B", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "not used", "1": "exact paraphrases", "2": "similar sentences", "3": "non-paraphrases"}}}}], "splits": [{"name": "train", "num_bytes": 539121, "num_examples": 5000}, {"name": "validation", "num_bytes": 107010, "num_examples": 1000}, {"name": "test", "num_bytes": 106515, "num_examples": 1000}]}}
2024-01-19T06:11:43+00:00
82ddf9c0d06ffc982aeccf2473b7ce31f2167adf
# 8TAGS ### Dataset Summary A Polish topic classification dataset consisting of headlines from social media posts. It contains about 50,000 sentences annotated with 8 topic labels: film, history, food, medicine, motorization, work, sport and technology. This dataset was created automatically by extracting sentences from headlines and short descriptions of articles posted on Polish social networking site **wykop.pl**. The service allows users to annotate articles with one or more tags (categories). Dataset represents a selection of article sentences from 8 popular categories. The resulting corpus contains cleaned and tokenized, unambiguous sentences (tagged with only one of the selected categories), and longer than 30 characters. ### Data Instances Example instance: ``` { "sentence": "Kierowca był nieco zdziwiony że podróżując sporo ponad 200 km / h zatrzymali go policjanci.", "label": "4" } ``` ### Data Fields - sentence: sentence text - label: label identifier corresponding to one of 8 topics ### Citation Information ``` @inproceedings{dadas-etal-2020-evaluation, title = "Evaluation of Sentence Representations in {P}olish", author = "Dadas, Slawomir and Pere{\l}kiewicz, Micha{\l} and Po{\'s}wiata, Rafa{\l}", booktitle = "Proceedings of the 12th Language Resources and Evaluation Conference", month = may, year = "2020", address = "Marseille, France", publisher = "European Language Resources Association", url = "https://aclanthology.org/2020.lrec-1.207", pages = "1674--1680", language = "English", ISBN = "979-10-95546-34-4", } ```
sdadas/8tags
[ "task_categories:text-classification", "task_ids:topic-classification", "task_ids:multi-class-classification", "multilinguality:monolingual", "size_categories:10K<n<100K", "language:pl", "license:cc-by-nc-sa-4.0", "region:us" ]
2022-12-29T10:19:38+00:00
{"language": ["pl"], "license": ["cc-by-nc-sa-4.0"], "multilinguality": ["monolingual"], "size_categories": ["10K<n<100K"], "task_categories": ["text-classification"], "task_ids": ["topic-classification", "multi-class-classification"], "pretty_name": "8TAGS", "dataset_info": {"features": [{"name": "sentence", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "film", "1": "history", "2": "food", "3": "medicine", "4": "motorization", "5": "work", "6": "sport", "7": "technology"}}}}], "splits": [{"name": "train", "num_bytes": 3765325, "num_examples": 40001}, {"name": "validation", "num_bytes": 467676, "num_examples": 5000}, {"name": "test", "num_bytes": 416311, "num_examples": 4372}]}}
2024-01-19T06:10:23+00:00
add7cab1932a3b0e6d9347285218dd0fe98ef1aa
# Dataset Card for "speech2text" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
qbaro/speech2text
[ "region:us" ]
2022-12-29T10:54:15+00:00
{"dataset_info": {"features": [{"name": "sentence", "dtype": "string"}, {"name": "audio", "struct": [{"name": "array", "sequence": "float32"}, {"name": "path", "dtype": "string"}, {"name": "sampling_rate", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 1357744185, "num_examples": 1057}, {"name": "test", "num_bytes": 589556544, "num_examples": 464}], "download_size": 1949997840, "dataset_size": 1947300729}}
2022-12-30T08:47:33+00:00
e7d9481ad419f20c4b87c05859bc0be17c923e74
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Prgckwb/jiro-style-ramen
[ "region:us" ]
2022-12-29T10:58:35+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 978393.0, "num_examples": 31}], "download_size": 978665, "dataset_size": 978393.0}}
2022-12-29T13:36:40+00:00
9a3809d0d5e8b975b2ed489dc7be07a97a09de05
# Dataset Card for "results_valid_20rows_2022-12-29" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
joddy/results_valid_20rows_2022-12-29
[ "region:us" ]
2022-12-29T12:49:17+00:00
{"dataset_info": {"features": [{"name": "pixel_values", "dtype": "image"}, {"name": "resolution", "dtype": "int64"}, {"name": "attributes_loc", "dtype": {"class_label": {"names": {"0": "upper left", "1": "upper right", "2": "lower left", "3": "lower right"}}}}, {"name": "NL_text", "dtype": "string"}, {"name": "bbox_text", "dtype": "string"}, {"name": "center_text", "dtype": "string"}, {"name": "normed_object_bbox", "sequence": "int64"}, {"name": "without_pos_stable-diffusion-v1-5", "dtype": "image"}, {"name": "NL_stable-diffusion-v1-5", "dtype": "image"}, {"name": "bbox_stable-diffusion-v1-5", "dtype": "image"}, {"name": "center_stable-diffusion-v1-5", "dtype": "image"}, {"name": "without_pos_NL_text_TextENC_off", "dtype": "image"}, {"name": "NL_text_TextENC_off", "dtype": "image"}, {"name": "without_pos_bbox_text_TextENC_off", "dtype": "image"}, {"name": "bbox_text_TextENC_off", "dtype": "image"}, {"name": "without_pos_center_text_TextENC_off", "dtype": "image"}, {"name": "center_text_TextENC_off", "dtype": "image"}, {"name": "without_pos_NL_text_TextENC_on", "dtype": "image"}, {"name": "NL_text_TextENC_on", "dtype": "image"}, {"name": "without_pos_bbox_text_TextENC_on", "dtype": "image"}, {"name": "bbox_text_TextENC_on", "dtype": "image"}, {"name": "without_pos_center_text_TextENC_on", "dtype": "image"}, {"name": "center_text_TextENC_on", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 160413036.0, "num_examples": 20}], "download_size": 160434518, "dataset_size": 160413036.0}}
2022-12-29T13:04:15+00:00
a896ce6881f13f95b061fd0759782288d50572d1
Rafeq/Donat_a_cry
[ "license:mit", "region:us" ]
2022-12-29T13:32:55+00:00
{"license": "mit"}
2022-12-29T13:38:28+00:00
c6beef35c66617554fb99eeb88bef3bf9141dfe6
# Dataset Card for Pochita Dataset ## Dataset created for fine-tuning Stable Diffusion model on HuggingFace Diffusion Hackaton Consists of 19 photos of Pochita plushie. (ye, he's cute)
Arch4ngel/pochita
[ "task_categories:summarization", "task_ids:news-articles-summarization", "annotations_creators:no-annotation", "language_creators:found", "size_categories:100K<n<1M", "source_datasets:original", "language:en", "license:cc", "region:us" ]
2022-12-29T13:42:14+00:00
{"annotations_creators": ["no-annotation"], "language_creators": ["found"], "language": ["en"], "license": "cc", "size_categories": ["100K<n<1M"], "source_datasets": ["original"], "task_categories": ["summarization"], "task_ids": ["news-articles-summarization"], "dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 73272140.0, "num_examples": 19}], "download_size": 73099117, "dataset_size": 73272140.0}}
2022-12-29T14:00:15+00:00
ec5e31de25f64ea06c378eaa6b982a28301e60f9
The original dataset is in French (https://www.kaggle.com/datasets/fedi1996/insurance-reviews-france) --- Dataset was translated to the Dutch language using the Google translate python library googletrans==3.1.0a0 --- The sentiment labels are 1 (POS) and -1 (NEG) ---
ebrigham/NL_insurance_reviews_sentiment
[ "task_categories:text-classification", "size_categories:1K<n<10K", "language:nl", "region:us" ]
2022-12-29T13:59:07+00:00
{"language": ["nl"], "size_categories": ["1K<n<10K"], "task_categories": ["text-classification"]}
2023-10-23T08:18:16+00:00
13df88874e25c0b8a035c058e30d460c1be94a9e
Torkan/testing
[ "license:afl-3.0", "region:us" ]
2022-12-29T14:45:21+00:00
{"license": "afl-3.0"}
2022-12-29T14:51:30+00:00
3dbcb691c145b979dd157248197b4e28af3e8789
tbbl/testing
[ "region:us" ]
2022-12-29T15:08:21+00:00
{}
2023-01-06T14:06:06+00:00
7bccfdcabf8dc14fb27bab2a508818fec0128769
# Dataset Card for "bankingapp_sentiment" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dvilasuero/bankingapp_sentiment
[ "region:us" ]
2022-12-29T15:40:21+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "inputs", "struct": [{"name": "text", "dtype": "string"}]}, {"name": "prediction", "dtype": "null"}, {"name": "prediction_agent", "dtype": "null"}, {"name": "annotation", "dtype": "string"}, {"name": "annotation_agent", "dtype": "string"}, {"name": "multi_label", "dtype": "bool"}, {"name": "explanation", "dtype": "null"}, {"name": "id", "dtype": "string"}, {"name": "metadata", "dtype": "null"}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "null"}, {"name": "metrics", "struct": [{"name": "text_length", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 163514, "num_examples": 1000}], "download_size": 79893, "dataset_size": 163514}}
2022-12-29T15:40:35+00:00
788f9b660eb2ff16caf65f01f22df66cf6b85880
--- annotations_creators: - other language: [] language_creators: - other license: [] multilinguality: [] pretty_name: A dataset containing 17 images of a plush reindeer size_categories: [] source_datasets: [] tags: [] task_categories: - other task_ids: [] ---sssssssssssssssssssssssssssssssssssss
Likalto4/Rena_dataset
[ "region:us" ]
2022-12-29T15:49:54+00:00
{}
2022-12-29T16:04:31+00:00
412bd606998b3be099f39922211b270040ae2e30
![](https://huggingface.co/datasets/TrpFrog/trpfrog-icons/resolve/main/logo.jpg) # trpfrog-icons Dataset This is a dataset of [TrpFrog](https://trpfrog.net)'s icons. By the way, what do you use this for? 🤔 ## How to use ```py from datasets import load_dataset dataset = load_dataset("TrpFrog/trpfrog-icons") ``` ```py # print all data for data in dataset["train"]: print(data) # remove not green icons dataset = dataset.filter(lambda x: x["label"] == 0) ``` ## License MIT License
TrpFrog/trpfrog-icons
[ "license:mit", "region:us" ]
2022-12-29T17:00:46+00:00
{"license": "mit", "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "green", "1": "others"}}}}], "splits": [{"name": "train", "num_bytes": 3106612.0, "num_examples": 50}], "download_size": 2598455, "dataset_size": 3106612.0}}
2022-12-30T04:37:09+00:00
4b749678d49457904a86a5936bf5c955b23bec68
senseiberia/768_regularization_images
[ "license:gpl", "region:us" ]
2022-12-29T17:40:53+00:00
{"license": "gpl"}
2022-12-29T18:22:48+00:00
81a394970aa0aab17c354b4ffbcbf5539dc00397
All images of all ratings from e621.net from the date it was generated, at sample resolution where possible. This includes the following additional metadata: - post ID - created at - updated at - tags (stored as IDs you can cross-reference from an e621 tags dump) - rating (0 = safe, 1 = questionable, 2 = explicit) - favorite count - comment count - up score - down score Note that this dataset excludes images that are, at the time of scraping: - pending - tagged with tags indicating that it is illegal to possess in most jurisdictions Some files in this dataset may be corrupted. Make sure you're able to handle invalid images in your processing code or you're going to have bad time!
thruway/e621_samples_2022-12-28
[ "region:us" ]
2022-12-29T18:29:17+00:00
{"extra_gated_heading": "Terms of use", "extra_gated_button_content": "Acknowledge", "extra_gated_fields": {"I will use this dataset in a way that does not hinder the ability of artists to make a living from their work": "checkbox", "I acknowledge that the content contained within this dataset is the intellectual property of the artists who created it": "checkbox", "If I should wish to use this dataset for any commercial purposes, it is my responsibility to obtain the appropriate permissions from the copyright holders": "checkbox"}, "dataset_info": {"features": [{"name": "id", "dtype": "uint32"}, {"name": "created_at", "dtype": "timestamp[us]"}, {"name": "updated_at", "dtype": "timestamp[us]"}, {"name": "image", "dtype": "image"}, {"name": "tags", "sequence": "uint32"}, {"name": "rating", "dtype": "uint8"}, {"name": "fav_count", "dtype": "uint32"}, {"name": "comment_count", "dtype": "uint32"}, {"name": "up_score", "dtype": "int32"}, {"name": "down_score", "dtype": "int32"}], "splits": [{"name": "train", "num_bytes": 384353755927.75, "num_examples": 3065570}], "download_size": 382556768725, "dataset_size": 384353755927.75}, "viewer": false}
2022-12-30T22:02:16+00:00
aa052d3aff7a0398d227d7f5a3ac1699007bb1df
# Dataset Card for "dreambooth-hackathon-images-nendoroid" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
milyiyo/dreambooth-hackathon-images-nendoroid
[ "region:us" ]
2022-12-29T18:53:19+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 795179.0, "num_examples": 28}], "download_size": 795969, "dataset_size": 795179.0}}
2022-12-29T18:53:23+00:00
a19eace121442bce60da9f5036dc16bf9f2f6fa6
<div align="center"> <img width="640" alt="keremberke/construction-safety-object-detection" src="https://huggingface.co/datasets/keremberke/construction-safety-object-detection/resolve/main/thumbnail.jpg"> </div> ### Dataset Labels ``` ['barricade', 'dumpster', 'excavators', 'gloves', 'hardhat', 'mask', 'no-hardhat', 'no-mask', 'no-safety vest', 'person', 'safety net', 'safety shoes', 'safety vest', 'dump truck', 'mini-van', 'truck', 'wheel loader'] ``` ### Number of Images ```json {'train': 307, 'valid': 57, 'test': 34} ``` ### How to Use - Install [datasets](https://pypi.org/project/datasets/): ```bash pip install datasets ``` - Load the dataset: ```python from datasets import load_dataset ds = load_dataset("keremberke/construction-safety-object-detection", name="full") example = ds['train'][0] ``` ### Roboflow Dataset Page [https://universe.roboflow.com/roboflow-universe-projects/construction-site-safety/dataset/1](https://universe.roboflow.com/roboflow-universe-projects/construction-site-safety/dataset/1?ref=roboflow2huggingface) ### Citation ``` @misc{ construction-site-safety_dataset, title = { Construction Site Safety Dataset }, type = { Open Source Dataset }, author = { Roboflow Universe Projects }, howpublished = { \\url{ https://universe.roboflow.com/roboflow-universe-projects/construction-site-safety } }, url = { https://universe.roboflow.com/roboflow-universe-projects/construction-site-safety }, journal = { Roboflow Universe }, publisher = { Roboflow }, year = { 2023 }, month = { jan }, note = { visited on 2023-01-26 }, } ``` ### License CC BY 4.0 ### Dataset Summary This dataset was exported via roboflow.com on December 29, 2022 at 11:22 AM GMT Roboflow is an end-to-end computer vision platform that helps you * collaborate with your team on computer vision projects * collect & organize images * understand unstructured image data * annotate, and create datasets * export, train, and deploy computer vision models * use active learning to improve your dataset over time It includes 398 images. Construction are annotated in COCO format. The following pre-processing was applied to each image: * Auto-orientation of pixel data (with EXIF-orientation stripping) No image augmentation techniques were applied.
keremberke/construction-safety-object-detection
[ "task_categories:object-detection", "roboflow", "roboflow2huggingface", "Construction", "Logistics", "Utilities", "Damage Risk", "Ppe", "Manufacturing", "Assembly Line", "Warehouse", "Factory", "region:us" ]
2022-12-29T20:12:45+00:00
{"task_categories": ["object-detection"], "tags": ["roboflow", "roboflow2huggingface", "Construction", "Logistics", "Utilities", "Damage Risk", "Ppe", "Construction", "Utilities", "Manufacturing", "Logistics", "Ppe", "Assembly Line", "Warehouse", "Factory"]}
2023-01-27T13:36:19+00:00
a7e4591f90b620c0a0e44ea6d2e44607f2eb0b99
breadlicker45/midi-hex-data
[ "license:other", "region:us" ]
2022-12-29T21:11:24+00:00
{"license": "other"}
2022-12-29T21:14:38+00:00
caef5b500f6ecbf5c6d9f73eb393bdf6966eca7d
# Dataset Card for "diffusion_db_10k_processed" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
bggmyfuture-ai/diffusion_db_10k_processed
[ "region:us" ]
2022-12-29T21:23:33+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "label_txt", "dtype": "string"}, {"name": "topic_keywords", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 2572020, "num_examples": 8481}], "download_size": 570847, "dataset_size": 2572020}}
2022-12-31T02:31:28+00:00
43e04f2c5dbaadb2b98cf678e17b979bf198db2e
LINK TO THE ITEM :: https://www.wakefit.co/coffee-tables/sheesham-wood-coffee-table-jackson/WSCFTJACKSONR1
alpha-proj/wakefit_center_table
[ "region:us" ]
2022-12-29T22:00:46+00:00
{}
2022-12-29T22:12:15+00:00
c2697e3d12792ed790fd0b407ae8e37b5ea8e0c8
kokuma/figuritas-de-mazapan
[ "license:openrail", "region:us" ]
2022-12-29T22:17:04+00:00
{"license": "openrail"}
2022-12-29T23:38:56+00:00
4e9b2f7204e129f8fa45baa0c9bdb1e7216dce96
# Dataset Card for "shoebill-hackathon-images" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
fnavales/shoebill-hackathon-images
[ "region:us" ]
2022-12-29T22:47:44+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 272566.0, "num_examples": 18}], "download_size": 264844, "dataset_size": 272566.0}}
2022-12-29T22:47:48+00:00
b126db0f2388da3aee9d5193397487a69c625a7e
DavidVivancos/MindBigData2022_MNIST_MU
[ "license:odbl", "region:us" ]
2022-12-29T22:49:09+00:00
{"license": "odbl"}
2022-12-29T22:50:39+00:00
6d1ae6070fe2530bdf4a2185ab8e9edb772e9d75
DavidVivancos/MindBigData2022_MNIST_IN
[ "license:odbl", "region:us" ]
2022-12-29T22:51:29+00:00
{"license": "odbl"}
2022-12-29T22:52:37+00:00
eadb3c19dcf97f579393883947fe0c650690365f
DavidVivancos/MindBigData2022_MNIST_EP
[ "license:odbl", "region:us" ]
2022-12-29T23:15:09+00:00
{"license": "odbl"}
2022-12-29T23:17:06+00:00
a1cb132fc175685acf56daf1cbc41f7f2b9362ae
# Dataset Card for "bookcorpus_compact_512" Num samples: 1,219,333 [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
saibo/bookcorpus_compact_512
[ "region:us" ]
2022-12-29T23:46:15+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2755013516, "num_examples": 1219333}], "download_size": 1625636757, "dataset_size": 2755013516}}
2022-12-30T21:51:11+00:00
0eaa262f877d80ddb219819c34fbb8dca4ae4c54
# Dataset Card for "bookcorpus_compact_256" Num samples: 2,389,359 [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
saibo/bookcorpus_compact_256
[ "region:us" ]
2022-12-30T01:43:43+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2758524897, "num_examples": 2389359}], "download_size": 1630356023, "dataset_size": 2758524897}}
2022-12-30T21:52:38+00:00
31a9eb9e37c68a3e3558f927415c265671b685c2
## Dataset Description - **Homepage: https://mirror.xyz/bitkevin.eth** - **Repository: https://colab.research.google.com/drive/1EnqpDiKOVYhR0c6f4CgmDg2zqcbYZJpB#scrollTo=c1ef3d21-6e0e-46c9-a459-8a2ab856a5ca** - **Point of Contact: Kevin Leffew – [email protected]** ### Dataset Summary: golf-course This dataset (bethecloud/golf-courses) includes 21 unique images of golf courses pulled from Unsplash. The dataset is a collection of photographs taken at various golf courses around the world. The images depict a variety of scenes, including fairways, greens, bunkers, water hazards, and clubhouse facilities. The images are high resolution and have been carefully selected to provide a diverse range of visual content for fine-tuning a machine learning model. The dataset is intended to be used in the context of the Hugging Face Dream Booth hackathon, a competition that challenges participants to build innovative applications using the Hugging Face transformers library. The submission is for the category of landscape. Overall, this dataset provides a rich source of visual data for machine learning models looking to understand and classify elements of golf courses. Its diverse range of images and high-quality resolution make it well-suited for use in fine-tuning models for tasks such as image classification, object detection, and image segmentation. By using the golf course images as part of their training data, participants can fine-tune their models to recognize and classify specific features and elements commonly found on golf courses. The ultimate goal after the hackathon is to pull this dataset from decentralized cloud storage (like Storj DCS), increasing its accessibility, performance, and resilience by distributing across an edge of over 17,000 uncorrelated participants. ## Example Output ![golf-acropolis.jpg]https://link.storjshare.io/juid5vc27dbajh6zyzplf4fah5xq/golf-course-output%2Fgolf-acropolis.png # Usage The golf-courses dataset can be used by modifying the instance_prompt: a photo of golf course ### Languages The language data in golf-courses is in English (BCP-47 en) ## Dataset Structure The complete dataset is GBs and consists of 21 objects. ### Parallelized download using Decentralized Object Storage (Storj DCS) A direct download for the dataset is located at https://link.storjshare.io/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses. In the future, Storj DCS will be used to download large datasets (exceeding 1TB) in a highly parallel, highly performant, and highly economical manner (by utilizing a network of over 17,000 diverse and economically incentivized datacenter node endpoints. ### Curation Rationale This model was created as a sample by Kevin Leffew as part of the DreamBooth Hackathon. ### Source Data The source data for the dataset is simply pulled from Unsplash ### Licensing Information MIT License ## Thanks to John Whitaker and Lewis Tunstall Thanks to [John Whitaker](https://github.com/johnowhitaker) and [Lewis Tunstall](https://github.com/lewtun)for writing out and describing the initial hackathon parameters at https://huggingface.co/dreambooth-hackathon. ## Example Training Data ![golf-course1.jpg](https://link.storjshare.io/raw/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses/andrew-anderson-CtyC2JjLhVg-unsplash.jpg) ![golf-course2.jpg](https://link.storjshare.io/raw/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses/dean-SuGEzQkeJno-unsplash.jpg) ![golf-course3.jpg](https://link.storjshare.io/raw/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses/amauri-cruz-filho-kBNV9WpCs5k-unsplash.jpg) ![golf-course4.jpg](https://link.storjshare.io/raw/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses/minho-yoon-_ZVEio7AkGc-unsplash.jpg) ![golf-course5.jpg](https://link.storjshare.io/raw/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses/minho-yoon-_ZVEio7AkGc-unsplash.jpg) ![golf-course6.jpg](https://link.storjshare.io/s/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses/jura-FegOaqn_4GQ-unsplash%20%281%29.jpg?wrap=1) ![golf-course7.jpg](https://link.storjshare.io/s/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses/sly-dizzle-cE6SpYTfqqg-unsplash.jpg?wrap=1) ![golf-course8.jpg](https://link.storjshare.io/s/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses/jura-FegOaqn_4GQ-unsplash.jpg) ![golf-course9.jpg](https://link.storjshare.io/s/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses/sly-dizzle-cE6SpYTfqqg-unsplash.jpg?wrap=1) ![golf-course10.jpg](https://link.storjshare.io/s/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses/dean-ricciardi-08Ipbe8GpWw-unsplash.jpg) ![golf-course11.jpg](https://link.storjshare.io/s/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses/jonas-from-berlin-UgwkaRUt2d0-unsplash.jpg) ![golf-course12.jpg](https://link.storjshare.io/s/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses/rob-tol-Ner8kdSXh0M-unsplash.jpg) ![golf-course13.jpg](https://link.storjshare.io/s/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses/richard-brutyo-HQXFhq8FNJ8-unsplash.jpg?wrap=1) ![golf-course14.jpg](https://link.storjshare.io/s/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses/edwin-compton-Z8XlmAj65iM-unsplash.jpg?wrap=1)
bethecloud/golf-courses
[ "task_categories:image-classification", "task_ids:multi-label-image-classification", "annotations_creators:machine-generated", "language_creators:found", "multilinguality:monolingual", "size_categories:n<1K", "language:en", "license:mit", "golf-courses", "region:us" ]
2022-12-30T01:44:22+00:00
{"annotations_creators": ["machine-generated"], "language_creators": ["found"], "language": ["en"], "license": ["mit"], "multilinguality": ["monolingual"], "size_categories": ["n<1K"], "source_datasets": [], "task_categories": ["image-classification"], "task_ids": ["multi-label-image-classification"], "pretty_name": "bethecloud/golf-courses", "tags": ["golf-courses"]}
2022-12-30T06:55:56+00:00
98f1a36334b12f38ccf7a4bf76e3f2369c9fbbeb
# Dataset Card for "bookcorpus_compact_1024" Num samples: 616,051 The number of tokens for each sequence is not exactly 1024, but all slightly shorter than 1024. The sequences were built by merging sentences to the maximal length shorter than 1024 tokens. Therefore, padding is necessary for batch processing. ```python import time from typing import List from datasets import load_dataset, Dataset from tqdm import tqdm from transformers import AutoTokenizer def batch_tokenize(texts: List[str], tokenizer, batch_size=1000): start = time.time() """Tokenize the texts in batch""" assert tokenizer.is_fast, "tokenizer must be fast tokenizer" tokenized_texts = [] for i in tqdm(range(0, len(texts), batch_size)): batch = texts[i:i + batch_size] batch_encoding = tokenizer(batch) tokenized_texts.extend(batch_encoding["input_ids"]) print(f"batch_tokenize time with bs={batch_size}: {time.time() - start}") return tokenized_texts class CompactText: def __init__(self, tokenizer="gpt2", split="test", block_size=512): self.block_size = block_size self.tokenizer = AutoTokenizer.from_pretrained(tokenizer) def compact_load(self, dataset_name: str, split: str): dataset = load_dataset(dataset_name)[split] batch_encoding = batch_tokenize(dataset["text"], self.tokenizer, batch_size=10000) compact_texts = [] texts = dataset["text"] total_num_tok = 0 tracker = [] i = 0 for j in tqdm(range(len(batch_encoding))): total_num_tok += len(batch_encoding[j]) if total_num_tok >= self.block_size: batch_sents = texts[i:j] big_sent = " ".join(batch_sents) compact_texts.append(big_sent) tracker.append((i, j)) i = j total_num_tok = 0 print(tracker) # self.examples = compact_texts compact_ds = Dataset.from_dict({"text": compact_texts}) return compact_ds if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument("-b", "--block-size", type=int, default=512) args = parser.parse_args() compactifier = CompactText(block_size=args.block_size) dataset = compactifier.compact_load(dataset_name="saibo/bookcorpus_deduplicated", split="train") dataset.push_to_hub(f"saibo/bookcorpus_compact_{args.block_size}") ``` [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
saibo/bookcorpus_compact_1024
[ "size_categories:100K<n<1M", "region:us" ]
2022-12-30T01:45:52+00:00
{"size_categories": ["100K<n<1M"], "dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2753205189, "num_examples": 616051}], "download_size": 1603181006, "dataset_size": 2753205189}}
2023-01-10T11:48:52+00:00
e3665ecd171e82b1a4b3b4a921c2194150e1daa1
# Dataset Card for "bookcorpus_small_compact_512" Num samples: 3,109 [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
saibo/bookcorpus_small_compact_512
[ "region:us" ]
2022-12-30T02:15:23+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "concept_with_offset", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 19751847, "num_examples": 3109}], "download_size": 9777636, "dataset_size": 19751847}}
2023-01-18T22:18:48+00:00
3419587a608a9002eb4bd740b9cf78e20e4c4396
# Dataset Card for "bookcorpus_small_compact_256" Num samples: 6,104 [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
saibo/bookcorpus_small_compact_256
[ "region:us" ]
2022-12-30T02:15:45+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "null"}, {"name": "concept_with_offset", "dtype": "null"}], "splits": [{"name": "train"}], "download_size": 0, "dataset_size": 0}}
2023-03-08T08:34:19+00:00
d217b7632eadca4f9a3c67451c374dfbb8d71a4d
# Dataset Card for "bookcorpus_small_compact_1024" Num samples: 1,571 [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
saibo/bookcorpus_small_compact_1024
[ "region:us" ]
2022-12-30T02:16:37+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "concept_with_offset", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 18843209, "num_examples": 1571}], "download_size": 9378154, "dataset_size": 18843209}}
2023-01-19T10:09:58+00:00