sha
stringlengths
40
40
text
stringlengths
1
13.4M
id
stringlengths
2
117
tags
listlengths
1
7.91k
created_at
stringlengths
25
25
metadata
stringlengths
2
875k
last_modified
stringlengths
25
25
arxiv
listlengths
0
25
languages
listlengths
0
7.91k
tags_str
stringlengths
17
159k
text_str
stringlengths
1
447k
text_lists
listlengths
0
352
processed_texts
listlengths
1
353
tokens_length
listlengths
1
353
input_texts
listlengths
1
40
ba4f8e48ba4afaca6c6831ee2341004a03cc2df7
Source: https://github.com/rudinger/winogender-schemas/tree/master ``` @InProceedings{rudinger-EtAl:2018:N18, author = {Rudinger, Rachel and Naradowsky, Jason and Leonard, Brian and {Van Durme}, Benjamin}, title = {Gender Bias in Coreference Resolution}, booktitle = {Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies}, month = {June}, year = {2018}, address = {New Orleans, Louisiana}, publisher = {Association for Computational Linguistics} } ```
oskarvanderwal/winogender
[ "language:en", "license:mit", "gender bias", "region:us" ]
2023-11-28T15:50:16+00:00
{"language": ["en"], "license": "mit", "pretty_name": "Winogender", "tags": ["gender bias"], "configs": [{"config_name": "all", "data_files": [{"split": "test", "path": "test.tsv"}]}, {"config_name": "gotcha", "data_files": [{"split": "test", "path": "gotcha.tsv"}]}]}
2023-12-20T10:26:26+00:00
[]
[ "en" ]
TAGS #language-English #license-mit #gender bias #region-us
Source: URL
[]
[ "TAGS\n#language-English #license-mit #gender bias #region-us \n" ]
[ 20 ]
[ "passage: TAGS\n#language-English #license-mit #gender bias #region-us \n" ]
a7e9786e3930b57ae95d38859106e4b50444f943
# Dataset Card for "google-argentinian-spanish" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ylacombe/google-argentinian-spanish
[ "region:us" ]
2023-11-28T15:57:18+00:00
{"dataset_info": [{"config_name": "female", "features": [{"name": "audio", "dtype": "audio"}, {"name": "text", "dtype": "string"}, {"name": "speaker_id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1928460472.968, "num_examples": 3921}], "download_size": 1625565296, "dataset_size": 1928460472.968}, {"config_name": "male", "features": [{"name": "audio", "dtype": "audio"}, {"name": "text", "dtype": "string"}, {"name": "speaker_id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 844151626.352, "num_examples": 1818}], "download_size": 707569029, "dataset_size": 844151626.352}], "configs": [{"config_name": "female", "data_files": [{"split": "train", "path": "female/train-*"}]}, {"config_name": "male", "data_files": [{"split": "train", "path": "male/train-*"}]}]}
2023-11-28T15:59:10+00:00
[]
[]
TAGS #region-us
# Dataset Card for "google-argentinian-spanish" More Information needed
[ "# Dataset Card for \"google-argentinian-spanish\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"google-argentinian-spanish\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"google-argentinian-spanish\"\n\nMore Information needed" ]
18c187b1bfa6f5ada649e4886d02744ea9acafa2
# Dataset Card for "google-colombian-spanish" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ylacombe/google-colombian-spanish
[ "region:us" ]
2023-11-28T15:58:13+00:00
{"dataset_info": [{"config_name": "female", "features": [{"name": "audio", "dtype": "audio"}, {"name": "text", "dtype": "string"}, {"name": "speaker_id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1280346176.08, "num_examples": 2369}], "download_size": 1038790546, "dataset_size": 1280346176.08}, {"config_name": "male", "features": [{"name": "audio", "dtype": "audio"}, {"name": "text", "dtype": "string"}, {"name": "speaker_id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1340876319.24, "num_examples": 2534}], "download_size": 1040171763, "dataset_size": 1340876319.24}], "configs": [{"config_name": "female", "data_files": [{"split": "train", "path": "female/train-*"}]}, {"config_name": "male", "data_files": [{"split": "train", "path": "male/train-*"}]}]}
2023-11-28T15:59:48+00:00
[]
[]
TAGS #region-us
# Dataset Card for "google-colombian-spanish" More Information needed
[ "# Dataset Card for \"google-colombian-spanish\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"google-colombian-spanish\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"google-colombian-spanish\"\n\nMore Information needed" ]
5271b0be5f3acfa9b066b6ecf2430e6bb08fec27
# Dataset Card for "google-gujarati" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ylacombe/google-gujarati
[ "region:us" ]
2023-11-28T15:59:19+00:00
{"dataset_info": [{"config_name": "female", "features": [{"name": "audio", "dtype": "audio"}, {"name": "text", "dtype": "string"}, {"name": "speaker_id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1481707913.184, "num_examples": 2219}], "download_size": 1171501409, "dataset_size": 1481707913.184}, {"config_name": "male", "features": [{"name": "audio", "dtype": "audio"}, {"name": "text", "dtype": "string"}, {"name": "speaker_id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1233511380.616, "num_examples": 2053}], "download_size": 1011014041, "dataset_size": 1233511380.616}], "configs": [{"config_name": "female", "data_files": [{"split": "train", "path": "female/train-*"}]}, {"config_name": "male", "data_files": [{"split": "train", "path": "male/train-*"}]}]}
2023-11-28T16:00:52+00:00
[]
[]
TAGS #region-us
# Dataset Card for "google-gujarati" More Information needed
[ "# Dataset Card for \"google-gujarati\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"google-gujarati\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"google-gujarati\"\n\nMore Information needed" ]
c7ab886515fbc6d1f0fa7cc53df3019e9f381762
# Dataset Card for "google-marathi" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ylacombe/google-marathi
[ "region:us" ]
2023-11-28T16:00:41+00:00
{"dataset_info": {"config_name": "female", "features": [{"name": "audio", "dtype": "audio"}, {"name": "text", "dtype": "string"}, {"name": "speaker_id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1044821483.114, "num_examples": 1569}], "download_size": 866109308, "dataset_size": 1044821483.114}, "configs": [{"config_name": "female", "data_files": [{"split": "train", "path": "female/train-*"}]}]}
2023-11-28T16:01:22+00:00
[]
[]
TAGS #region-us
# Dataset Card for "google-marathi" More Information needed
[ "# Dataset Card for \"google-marathi\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"google-marathi\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"google-marathi\"\n\nMore Information needed" ]
4943c6038da98ca71f0f61ae7fd5ea5bf7c3d060
## MVTec Capsule Category ### Dataset Labels ``` {0: "normal", 1: "abnormal"} ``` ### Number of Images ```json {'train': 219, 'test': 132} ``` ### How to Use - Install [datasets](https://pypi.org/project/datasets/): ```bash pip install datasets ``` - Load the dataset: ```python from datasets import load_dataset ds = load_dataset("alexsu52/mvtec_capsule") example = ds['train'][0] ``` ### MVTEC Dataset Page [https://www.mvtec.com/company/research/datasets/mvtec-ad](https://www.mvtec.com/company/research/datasets/mvtec-ad) ### Citation Paul Bergmann, Kilian Batzner, Michael Fauser, David Sattlegger, Carsten Steger: The MVTec Anomaly Detection Dataset: A Comprehensive Real-World Dataset for Unsupervised Anomaly Detection; in: International Journal of Computer Vision 129(4):1038-1059, 2021, DOI: 10.1007/s11263-020-01400-4. Paul Bergmann, Michael Fauser, David Sattlegger, Carsten Steger: MVTec AD — A Comprehensive Real-World Dataset for Unsupervised Anomaly Detection; in: IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 9584-9592, 2019, DOI: 10.1109/CVPR.2019.00982. ### License CC BY-NC-SA 4.0 ### Dataset Summary MVTec AD is a dataset for benchmarking anomaly detection methods with a focus on industrial inspection. It contains over 5000 high-resolution images divided into fifteen different object and texture categories. Each category comprises a set of defect-free training images and a test set of images with various kinds of defects as well as images without defects. Pixel-precise annotations of all anomalies are also provided. More information can be in our paper "MVTec AD – A Comprehensive Real-World Dataset for Unsupervised Anomaly Detection" and its extended version "The MVTec Anomaly Detection Dataset: A Comprehensive Real-World Dataset for Unsupervised Anomaly Detection".
alexsu52/mvtec_capsule
[ "task_categories:image-classification", "task_categories:image-segmentation", "license:cc-by-nc-sa-4.0", "region:us" ]
2023-11-28T16:06:49+00:00
{"license": "cc-by-nc-sa-4.0", "task_categories": ["image-classification", "image-segmentation"], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "mask", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "normal", "1": "abnormal"}}}}], "splits": [{"name": "train", "num_bytes": 252483624, "num_examples": 219}, {"name": "test", "num_bytes": 26466712, "num_examples": 132}], "download_size": 404252480, "dataset_size": 278950336}}
2023-12-01T10:52:17+00:00
[]
[]
TAGS #task_categories-image-classification #task_categories-image-segmentation #license-cc-by-nc-sa-4.0 #region-us
## MVTec Capsule Category ### Dataset Labels ### Number of Images ### How to Use - Install datasets: - Load the dataset: ### MVTEC Dataset Page URL Paul Bergmann, Kilian Batzner, Michael Fauser, David Sattlegger, Carsten Steger: The MVTec Anomaly Detection Dataset: A Comprehensive Real-World Dataset for Unsupervised Anomaly Detection; in: International Journal of Computer Vision 129(4):1038-1059, 2021, DOI: 10.1007/s11263-020-01400-4. Paul Bergmann, Michael Fauser, David Sattlegger, Carsten Steger: MVTec AD — A Comprehensive Real-World Dataset for Unsupervised Anomaly Detection; in: IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 9584-9592, 2019, DOI: 10.1109/CVPR.2019.00982. ### License CC BY-NC-SA 4.0 ### Dataset Summary MVTec AD is a dataset for benchmarking anomaly detection methods with a focus on industrial inspection. It contains over 5000 high-resolution images divided into fifteen different object and texture categories. Each category comprises a set of defect-free training images and a test set of images with various kinds of defects as well as images without defects. Pixel-precise annotations of all anomalies are also provided. More information can be in our paper "MVTec AD – A Comprehensive Real-World Dataset for Unsupervised Anomaly Detection" and its extended version "The MVTec Anomaly Detection Dataset: A Comprehensive Real-World Dataset for Unsupervised Anomaly Detection".
[ "## MVTec Capsule Category", "### Dataset Labels", "### Number of Images", "### How to Use\n\n- Install datasets:\n\n\n\n- Load the dataset:", "### MVTEC Dataset Page\nURL\n\nPaul Bergmann, Kilian Batzner, Michael Fauser, David Sattlegger, Carsten Steger: The MVTec Anomaly Detection Dataset: A Comprehensive Real-World Dataset for Unsupervised Anomaly Detection; in: International Journal of Computer Vision 129(4):1038-1059, 2021, DOI: 10.1007/s11263-020-01400-4.\n\nPaul Bergmann, Michael Fauser, David Sattlegger, Carsten Steger: MVTec AD — A Comprehensive Real-World Dataset for Unsupervised Anomaly Detection; in: IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 9584-9592, 2019, DOI: 10.1109/CVPR.2019.00982.", "### License\nCC BY-NC-SA 4.0", "### Dataset Summary\nMVTec AD is a dataset for benchmarking anomaly detection methods with a focus on industrial inspection. It contains over 5000 high-resolution images divided into fifteen different object and texture categories. Each category comprises a set of defect-free training images and a test set of images with various kinds of defects as well as images without defects.\n\nPixel-precise annotations of all anomalies are also provided. More information can be in our paper \"MVTec AD – A Comprehensive Real-World Dataset for Unsupervised Anomaly Detection\" and its extended version \"The MVTec Anomaly Detection Dataset: A Comprehensive Real-World Dataset for Unsupervised Anomaly Detection\"." ]
[ "TAGS\n#task_categories-image-classification #task_categories-image-segmentation #license-cc-by-nc-sa-4.0 #region-us \n", "## MVTec Capsule Category", "### Dataset Labels", "### Number of Images", "### How to Use\n\n- Install datasets:\n\n\n\n- Load the dataset:", "### MVTEC Dataset Page\nURL\n\nPaul Bergmann, Kilian Batzner, Michael Fauser, David Sattlegger, Carsten Steger: The MVTec Anomaly Detection Dataset: A Comprehensive Real-World Dataset for Unsupervised Anomaly Detection; in: International Journal of Computer Vision 129(4):1038-1059, 2021, DOI: 10.1007/s11263-020-01400-4.\n\nPaul Bergmann, Michael Fauser, David Sattlegger, Carsten Steger: MVTec AD — A Comprehensive Real-World Dataset for Unsupervised Anomaly Detection; in: IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 9584-9592, 2019, DOI: 10.1109/CVPR.2019.00982.", "### License\nCC BY-NC-SA 4.0", "### Dataset Summary\nMVTec AD is a dataset for benchmarking anomaly detection methods with a focus on industrial inspection. It contains over 5000 high-resolution images divided into fifteen different object and texture categories. Each category comprises a set of defect-free training images and a test set of images with various kinds of defects as well as images without defects.\n\nPixel-precise annotations of all anomalies are also provided. More information can be in our paper \"MVTec AD – A Comprehensive Real-World Dataset for Unsupervised Anomaly Detection\" and its extended version \"The MVTec Anomaly Detection Dataset: A Comprehensive Real-World Dataset for Unsupervised Anomaly Detection\"." ]
[ 42, 8, 5, 5, 18, 180, 10, 173 ]
[ "passage: TAGS\n#task_categories-image-classification #task_categories-image-segmentation #license-cc-by-nc-sa-4.0 #region-us \n## MVTec Capsule Category### Dataset Labels### Number of Images### How to Use\n\n- Install datasets:\n\n\n\n- Load the dataset:### MVTEC Dataset Page\nURL\n\nPaul Bergmann, Kilian Batzner, Michael Fauser, David Sattlegger, Carsten Steger: The MVTec Anomaly Detection Dataset: A Comprehensive Real-World Dataset for Unsupervised Anomaly Detection; in: International Journal of Computer Vision 129(4):1038-1059, 2021, DOI: 10.1007/s11263-020-01400-4.\n\nPaul Bergmann, Michael Fauser, David Sattlegger, Carsten Steger: MVTec AD — A Comprehensive Real-World Dataset for Unsupervised Anomaly Detection; in: IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 9584-9592, 2019, DOI: 10.1109/CVPR.2019.00982.### License\nCC BY-NC-SA 4.0### Dataset Summary\nMVTec AD is a dataset for benchmarking anomaly detection methods with a focus on industrial inspection. It contains over 5000 high-resolution images divided into fifteen different object and texture categories. Each category comprises a set of defect-free training images and a test set of images with various kinds of defects as well as images without defects.\n\nPixel-precise annotations of all anomalies are also provided. More information can be in our paper \"MVTec AD – A Comprehensive Real-World Dataset for Unsupervised Anomaly Detection\" and its extended version \"The MVTec Anomaly Detection Dataset: A Comprehensive Real-World Dataset for Unsupervised Anomaly Detection\"." ]
12e0ad9679599912c7aecb5eac814b6891c2f3f9
Dataset using the bert-cased tokenizer, cutoff sentences to 512 length (not sentence pairs), all sentence pairs extracted. Original datasets: - https://huggingface.co/datasets/bookcorpus - https://huggingface.co/datasets/wikipedia Variant: 20220301.en
gmongaras/BERT_Base_Cased_512_Dataset
[ "region:us" ]
2023-11-28T16:22:43+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 36961083473, "num_examples": 136338653}], "download_size": 13895887135, "dataset_size": 36961083473}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-28T19:38:13+00:00
[]
[]
TAGS #region-us
Dataset using the bert-cased tokenizer, cutoff sentences to 512 length (not sentence pairs), all sentence pairs extracted. Original datasets: - URL - URL Variant: URL
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
4ee347e5d8edd1efb3cd74ca2e7612c1983522c2
# Dataset Card for "iliad_odyssey_aligned_prompts" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
pnadel/iliad_odyssey_aligned_prompts
[ "region:us" ]
2023-11-28T16:48:53+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "sentid", "dtype": "string"}, {"name": "cit", "dtype": "string"}, {"name": "Eng", "dtype": "string"}, {"name": "Gk", "dtype": "string"}, {"name": "Lems", "dtype": "string"}, {"name": "prompt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 40459548.37679168, "num_examples": 36669}, {"name": "test", "num_bytes": 10115714.623208325, "num_examples": 9168}], "download_size": 24752837, "dataset_size": 50575263.0}}
2023-11-28T16:48:57+00:00
[]
[]
TAGS #region-us
# Dataset Card for "iliad_odyssey_aligned_prompts" More Information needed
[ "# Dataset Card for \"iliad_odyssey_aligned_prompts\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"iliad_odyssey_aligned_prompts\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"iliad_odyssey_aligned_prompts\"\n\nMore Information needed" ]
c9da69ef5f4afe8a19073a1c04b839a83987ac29
colab notebook: https://colab.research.google.com/drive/1LoT0hGbwRwmsVITQLNzTDuFjyWlcfyxL#scrollTo=VOmUDbrYlUYe
Hasan-Mesbaul-420/TEST_testing_audio01
[ "region:us" ]
2023-11-28T16:49:41+00:00
{"dataset_info": {"features": [{"name": "Audio File Path", "dtype": "string"}, {"name": "array", "sequence": "float64"}, {"name": "sampling_rate", "dtype": "int64"}, {"name": "audio", "struct": [{"name": "array", "sequence": "float64"}, {"name": "path", "dtype": "string"}, {"name": "sampling_rate", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 1122894490, "num_examples": 351}], "download_size": 295896888, "dataset_size": 1122894490}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-28T16:54:09+00:00
[]
[]
TAGS #region-us
colab notebook: URL
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
db871e75c268f062e3ca5913ac7debf7924a5f29
# Creating Cloud-Cloudless Paired Dataset To generate the Cloud-Cloudless Paired Dataset, we utilize an existing dataset that encompasses imagery from five municipalities in Colombia. This dataset is structured with each municipality containing 165 images, acquired through the satellite_extractor API and based on SentinelHub, spanning across 12 different channels. Within each municipality, we have meticulously identified the optimal cloudless image and stored the corresponding names in a dictionary called `cloudless_groundtruths`. The primary objective is to subtract this specific cloudless image from the set of 165 images, resulting in 164 images per municipality. Subsequently, each of these 164 images will be paired with the previously identified cloudless image. Consequently, this process creates a total of `164 * 2 * NUM_MUNICIPALITIES`, yielding 1640 images or 820 image pairs in total. To facilitate this dataset creation, we have introduced the class `Cloud2CloudlesDataset`. This class replicates each corresponding ground truth for the 164 images in each municipality, storing every paired set in a newly designated folder named `DATASET`. Originally, the images were formatted as `image_DD%%MM%%YY`. As part of the dataset creation process, we will rename these images to `image_DD%%MM%%YY_gt` for the ground truth image and `image_DD%%MM%%YY_cloud` for the image with clouds. Upon initialization, the class requires the path to the source dataset, which contains raw images for each municipality organized in N folders, and the final path where the new dataset will be stored. The implementation includes thorough testing to verify the number of images, ensuring that the final count aligns with the total number of images encountered in the source folder path. Additionally, one of the functions within this class ensures the existence of each folder in the specified destination path, guaranteeing a well-organized and comprehensive Cloud-Cloudless Paired Dataset. Github code [here](https://github.com/sebasmos/satellite.extractor/blob/main/notebooks/create_Cloud2CloudlesDataset.ipynb):
MITCriticalData/cloud2cloudless_dataset_5_municipalities
[ "license:mit", "region:us" ]
2023-11-28T18:42:07+00:00
{"license": "mit"}
2023-11-29T14:12:25+00:00
[]
[]
TAGS #license-mit #region-us
# Creating Cloud-Cloudless Paired Dataset To generate the Cloud-Cloudless Paired Dataset, we utilize an existing dataset that encompasses imagery from five municipalities in Colombia. This dataset is structured with each municipality containing 165 images, acquired through the satellite_extractor API and based on SentinelHub, spanning across 12 different channels. Within each municipality, we have meticulously identified the optimal cloudless image and stored the corresponding names in a dictionary called 'cloudless_groundtruths'. The primary objective is to subtract this specific cloudless image from the set of 165 images, resulting in 164 images per municipality. Subsequently, each of these 164 images will be paired with the previously identified cloudless image. Consequently, this process creates a total of '164 * 2 * NUM_MUNICIPALITIES', yielding 1640 images or 820 image pairs in total. To facilitate this dataset creation, we have introduced the class 'Cloud2CloudlesDataset'. This class replicates each corresponding ground truth for the 164 images in each municipality, storing every paired set in a newly designated folder named 'DATASET'. Originally, the images were formatted as 'image_DD%%MM%%YY'. As part of the dataset creation process, we will rename these images to 'image_DD%%MM%%YY_gt' for the ground truth image and 'image_DD%%MM%%YY_cloud' for the image with clouds. Upon initialization, the class requires the path to the source dataset, which contains raw images for each municipality organized in N folders, and the final path where the new dataset will be stored. The implementation includes thorough testing to verify the number of images, ensuring that the final count aligns with the total number of images encountered in the source folder path. Additionally, one of the functions within this class ensures the existence of each folder in the specified destination path, guaranteeing a well-organized and comprehensive Cloud-Cloudless Paired Dataset. Github code here:
[ "# Creating Cloud-Cloudless Paired Dataset\n\n\n\nTo generate the Cloud-Cloudless Paired Dataset, we utilize an existing dataset that encompasses imagery from five municipalities in Colombia. This dataset is structured with each municipality containing 165 images, acquired through the satellite_extractor API and based on SentinelHub, spanning across 12 different channels. Within each municipality, we have meticulously identified the optimal cloudless image and stored the corresponding names in a dictionary called 'cloudless_groundtruths'.\n\nThe primary objective is to subtract this specific cloudless image from the set of 165 images, resulting in 164 images per municipality. Subsequently, each of these 164 images will be paired with the previously identified cloudless image. Consequently, this process creates a total of '164 * 2 * NUM_MUNICIPALITIES', yielding 1640 images or 820 image pairs in total.\n\nTo facilitate this dataset creation, we have introduced the class 'Cloud2CloudlesDataset'. This class replicates each corresponding ground truth for the 164 images in each municipality, storing every paired set in a newly designated folder named 'DATASET'.\n\nOriginally, the images were formatted as 'image_DD%%MM%%YY'. As part of the dataset creation process, we will rename these images to 'image_DD%%MM%%YY_gt' for the ground truth image and 'image_DD%%MM%%YY_cloud' for the image with clouds.\n\nUpon initialization, the class requires the path to the source dataset, which contains raw images for each municipality organized in N folders, and the final path where the new dataset will be stored.\n\nThe implementation includes thorough testing to verify the number of images, ensuring that the final count aligns with the total number of images encountered in the source folder path. Additionally, one of the functions within this class ensures the existence of each folder in the specified destination path, guaranteeing a well-organized and comprehensive Cloud-Cloudless Paired Dataset.\n\n\nGithub code here:" ]
[ "TAGS\n#license-mit #region-us \n", "# Creating Cloud-Cloudless Paired Dataset\n\n\n\nTo generate the Cloud-Cloudless Paired Dataset, we utilize an existing dataset that encompasses imagery from five municipalities in Colombia. This dataset is structured with each municipality containing 165 images, acquired through the satellite_extractor API and based on SentinelHub, spanning across 12 different channels. Within each municipality, we have meticulously identified the optimal cloudless image and stored the corresponding names in a dictionary called 'cloudless_groundtruths'.\n\nThe primary objective is to subtract this specific cloudless image from the set of 165 images, resulting in 164 images per municipality. Subsequently, each of these 164 images will be paired with the previously identified cloudless image. Consequently, this process creates a total of '164 * 2 * NUM_MUNICIPALITIES', yielding 1640 images or 820 image pairs in total.\n\nTo facilitate this dataset creation, we have introduced the class 'Cloud2CloudlesDataset'. This class replicates each corresponding ground truth for the 164 images in each municipality, storing every paired set in a newly designated folder named 'DATASET'.\n\nOriginally, the images were formatted as 'image_DD%%MM%%YY'. As part of the dataset creation process, we will rename these images to 'image_DD%%MM%%YY_gt' for the ground truth image and 'image_DD%%MM%%YY_cloud' for the image with clouds.\n\nUpon initialization, the class requires the path to the source dataset, which contains raw images for each municipality organized in N folders, and the final path where the new dataset will be stored.\n\nThe implementation includes thorough testing to verify the number of images, ensuring that the final count aligns with the total number of images encountered in the source folder path. Additionally, one of the functions within this class ensures the existence of each folder in the specified destination path, guaranteeing a well-organized and comprehensive Cloud-Cloudless Paired Dataset.\n\n\nGithub code here:" ]
[ 11, 490 ]
[ "passage: TAGS\n#license-mit #region-us \n# Creating Cloud-Cloudless Paired Dataset\n\n\n\nTo generate the Cloud-Cloudless Paired Dataset, we utilize an existing dataset that encompasses imagery from five municipalities in Colombia. This dataset is structured with each municipality containing 165 images, acquired through the satellite_extractor API and based on SentinelHub, spanning across 12 different channels. Within each municipality, we have meticulously identified the optimal cloudless image and stored the corresponding names in a dictionary called 'cloudless_groundtruths'.\n\nThe primary objective is to subtract this specific cloudless image from the set of 165 images, resulting in 164 images per municipality. Subsequently, each of these 164 images will be paired with the previously identified cloudless image. Consequently, this process creates a total of '164 * 2 * NUM_MUNICIPALITIES', yielding 1640 images or 820 image pairs in total.\n\nTo facilitate this dataset creation, we have introduced the class 'Cloud2CloudlesDataset'. This class replicates each corresponding ground truth for the 164 images in each municipality, storing every paired set in a newly designated folder named 'DATASET'.\n\nOriginally, the images were formatted as 'image_DD%%MM%%YY'. As part of the dataset creation process, we will rename these images to 'image_DD%%MM%%YY_gt' for the ground truth image and 'image_DD%%MM%%YY_cloud' for the image with clouds.\n\nUpon initialization, the class requires the path to the source dataset, which contains raw images for each municipality organized in N folders, and the final path where the new dataset will be stored.\n\nThe implementation includes thorough testing to verify the number of images, ensuring that the final count aligns with the total number of images encountered in the source folder path. Additionally, one of the functions within this class ensures the existence of each folder in the specified destination path, guaranteeing a well-organized and comprehensive Cloud-Cloudless Paired Dataset.\n\n\nGithub code here:" ]
d0577ba2b207de1af48de00500135088a3d41595
# Dataset Card for "perturbed-explanations-tiny" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
cyrilzhang/perturbed-explanations-tiny
[ "region:us" ]
2023-11-28T18:46:45+00:00
{"dataset_info": {"features": [{"name": "query", "dtype": "string"}, {"name": "response", "dtype": "string"}, {"name": "response_worse", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 10701.0, "num_examples": 18}, {"name": "test", "num_bytes": 1189.0, "num_examples": 2}], "download_size": 0, "dataset_size": 11890.0}}
2023-11-28T18:47:57+00:00
[]
[]
TAGS #region-us
# Dataset Card for "perturbed-explanations-tiny" More Information needed
[ "# Dataset Card for \"perturbed-explanations-tiny\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"perturbed-explanations-tiny\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"perturbed-explanations-tiny\"\n\nMore Information needed" ]
1683f557fcd4a61838d093e55afc8bf0b3d7e2a0
This is a very small sample from PKU-Alignment/PKU-SafeRLHF-10K that has been processed for QA.
MelioAI/safety-qa-sample
[ "license:cc-by-nc-4.0", "region:us" ]
2023-11-28T19:02:26+00:00
{"license": "cc-by-nc-4.0", "dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "response", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 219061.38757718852, "num_examples": 500}, {"name": "valid", "num_bytes": 43812.2775154377, "num_examples": 100}, {"name": "test", "num_bytes": 43812.2775154377, "num_examples": 100}], "download_size": 197329, "dataset_size": 306685.9426080639}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "valid", "path": "data/valid-*"}, {"split": "test", "path": "data/test-*"}]}]}
2023-11-30T14:44:34+00:00
[]
[]
TAGS #license-cc-by-nc-4.0 #region-us
This is a very small sample from PKU-Alignment/PKU-SafeRLHF-10K that has been processed for QA.
[]
[ "TAGS\n#license-cc-by-nc-4.0 #region-us \n" ]
[ 17 ]
[ "passage: TAGS\n#license-cc-by-nc-4.0 #region-us \n" ]
ea1ee8abe656c6606df03392c3a7e0092dd4a8b8
# Dataset Card for "contracts_v8" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
paul-w-qs/contracts_v8
[ "region:us" ]
2023-11-28T19:32:45+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "TABLE_STYLE", "dtype": "string"}, {"name": "JSON_LABEL", "dtype": "string"}, {"name": "DICT_LABEL", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 265764399.424, "num_examples": 3904}], "download_size": 253696091, "dataset_size": 265764399.424}}
2023-11-28T19:34:17+00:00
[]
[]
TAGS #region-us
# Dataset Card for "contracts_v8" More Information needed
[ "# Dataset Card for \"contracts_v8\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"contracts_v8\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"contracts_v8\"\n\nMore Information needed" ]
ccbc49eba186c2e042a99bac3013b97ba40fd0a8
Dataset using the bert-cased tokenizer, cutoff sentences to 512 length (not sentence pairs), all sentence pairs extracted. Original datasets: https://huggingface.co/datasets/bookcorpus https://huggingface.co/datasets/wikipedia Variant: 20220301.en Mapped from: https://huggingface.co/datasets/gmongaras/BERT_Base_Cased_512_Dataset
gmongaras/BERT_Base_Cased_512_Dataset_Mapped
[ "region:us" ]
2023-11-28T19:43:32+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "token_type_ids", "sequence": "int8"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 52875464012.02522, "num_examples": 136226984}], "download_size": 17583618282, "dataset_size": 52875464012.02522}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-29T07:10:37+00:00
[]
[]
TAGS #region-us
Dataset using the bert-cased tokenizer, cutoff sentences to 512 length (not sentence pairs), all sentence pairs extracted. Original datasets: URL URL Variant: URL Mapped from: URL
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
4c0312ebe79c2946cefa60c32f5c35bbfc7cd56b
# Bangumi Image Base of Nodame Cantabile This is the image base of bangumi Nodame Cantabile, we detected 31 characters, 5634 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 281 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 181 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 1138 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 217 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 1479 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 373 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 553 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 126 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 116 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 158 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 56 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 35 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 36 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 130 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 73 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 54 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 28 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 52 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 71 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 39 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 44 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 76 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 46 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 60 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 19 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 16 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 31 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 18 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 17 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | ![preview 8](28/preview_8.png) | | 29 | 7 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | ![preview 7](29/preview_7.png) | N/A | | noise | 104 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/nodamecantabile
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-11-28T19:55:05+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-11-28T22:32:30+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Nodame Cantabile ====================================== This is the image base of bangumi Nodame Cantabile, we detected 31 characters, 5634 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
7efd8fa83b0f8b6c3e0c9fa0ef83cb535a4503e8
# Bangumi Image Base of Mahou Shoujo Pretty Sammy This is the image base of bangumi Mahou Shoujo Pretty Sammy, we detected 40 characters, 2878 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 1023 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 72 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 17 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 19 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 15 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 56 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 18 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 58 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 168 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 39 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 107 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 22 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 20 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 12 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 22 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 12 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 46 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 60 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 22 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 19 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 7 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | N/A | | 21 | 15 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 34 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 5 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | N/A | N/A | N/A | | 24 | 35 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 12 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 98 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 169 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 34 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | ![preview 8](28/preview_8.png) | | 29 | 18 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | ![preview 7](29/preview_7.png) | ![preview 8](29/preview_8.png) | | 30 | 60 | [Download](30/dataset.zip) | ![preview 1](30/preview_1.png) | ![preview 2](30/preview_2.png) | ![preview 3](30/preview_3.png) | ![preview 4](30/preview_4.png) | ![preview 5](30/preview_5.png) | ![preview 6](30/preview_6.png) | ![preview 7](30/preview_7.png) | ![preview 8](30/preview_8.png) | | 31 | 27 | [Download](31/dataset.zip) | ![preview 1](31/preview_1.png) | ![preview 2](31/preview_2.png) | ![preview 3](31/preview_3.png) | ![preview 4](31/preview_4.png) | ![preview 5](31/preview_5.png) | ![preview 6](31/preview_6.png) | ![preview 7](31/preview_7.png) | ![preview 8](31/preview_8.png) | | 32 | 10 | [Download](32/dataset.zip) | ![preview 1](32/preview_1.png) | ![preview 2](32/preview_2.png) | ![preview 3](32/preview_3.png) | ![preview 4](32/preview_4.png) | ![preview 5](32/preview_5.png) | ![preview 6](32/preview_6.png) | ![preview 7](32/preview_7.png) | ![preview 8](32/preview_8.png) | | 33 | 8 | [Download](33/dataset.zip) | ![preview 1](33/preview_1.png) | ![preview 2](33/preview_2.png) | ![preview 3](33/preview_3.png) | ![preview 4](33/preview_4.png) | ![preview 5](33/preview_5.png) | ![preview 6](33/preview_6.png) | ![preview 7](33/preview_7.png) | ![preview 8](33/preview_8.png) | | 34 | 17 | [Download](34/dataset.zip) | ![preview 1](34/preview_1.png) | ![preview 2](34/preview_2.png) | ![preview 3](34/preview_3.png) | ![preview 4](34/preview_4.png) | ![preview 5](34/preview_5.png) | ![preview 6](34/preview_6.png) | ![preview 7](34/preview_7.png) | ![preview 8](34/preview_8.png) | | 35 | 76 | [Download](35/dataset.zip) | ![preview 1](35/preview_1.png) | ![preview 2](35/preview_2.png) | ![preview 3](35/preview_3.png) | ![preview 4](35/preview_4.png) | ![preview 5](35/preview_5.png) | ![preview 6](35/preview_6.png) | ![preview 7](35/preview_7.png) | ![preview 8](35/preview_8.png) | | 36 | 271 | [Download](36/dataset.zip) | ![preview 1](36/preview_1.png) | ![preview 2](36/preview_2.png) | ![preview 3](36/preview_3.png) | ![preview 4](36/preview_4.png) | ![preview 5](36/preview_5.png) | ![preview 6](36/preview_6.png) | ![preview 7](36/preview_7.png) | ![preview 8](36/preview_8.png) | | 37 | 26 | [Download](37/dataset.zip) | ![preview 1](37/preview_1.png) | ![preview 2](37/preview_2.png) | ![preview 3](37/preview_3.png) | ![preview 4](37/preview_4.png) | ![preview 5](37/preview_5.png) | ![preview 6](37/preview_6.png) | ![preview 7](37/preview_7.png) | ![preview 8](37/preview_8.png) | | 38 | 5 | [Download](38/dataset.zip) | ![preview 1](38/preview_1.png) | ![preview 2](38/preview_2.png) | ![preview 3](38/preview_3.png) | ![preview 4](38/preview_4.png) | ![preview 5](38/preview_5.png) | N/A | N/A | N/A | | noise | 124 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/mahoushoujoprettysammy
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-11-28T19:56:15+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-11-29T03:23:04+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Mahou Shoujo Pretty Sammy =============================================== This is the image base of bangumi Mahou Shoujo Pretty Sammy, we detected 40 characters, 2878 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
d8c2e79aac8c569d3686110f9504512d26c876a8
# Dataset Card for "bootstrap_sms" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
BubbleJoe/bootstrap_sms
[ "region:us" ]
2023-11-28T20:05:50+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 473170, "num_examples": 1325}], "download_size": 106042, "dataset_size": 473170}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-28T20:05:51+00:00
[]
[]
TAGS #region-us
# Dataset Card for "bootstrap_sms" More Information needed
[ "# Dataset Card for \"bootstrap_sms\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"bootstrap_sms\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"bootstrap_sms\"\n\nMore Information needed" ]
fad97e0aa292e6c855d01b0b8e94ad20a30e113d
# Bangumi Image Base of Hinamatsuri This is the image base of bangumi Hinamatsuri, we detected 23 characters, 1820 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 107 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 93 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 11 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 342 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 216 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 40 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 27 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 90 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 39 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 24 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 28 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 64 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 30 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 284 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 51 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 14 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 217 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 28 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 25 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 9 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 30 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 8 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | noise | 43 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/hinamatsuri
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-11-28T20:08:48+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-11-28T21:12:18+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Hinamatsuri ================================= This is the image base of bangumi Hinamatsuri, we detected 23 characters, 1820 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
e6caf949d7419debf3ef97bdf198c02d233f8296
# Dataset Card for "summarize_from_feedback_oai_preprocessing_gpt2_153" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
vwxyzjn/summarize_from_feedback_oai_preprocessing_gpt2_153
[ "region:us" ]
2023-11-28T20:17:54+00:00
{"dataset_info": {"features": [{"name": "info", "struct": [{"name": "id", "dtype": "string"}, {"name": "post", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "subreddit", "dtype": "string"}, {"name": "site", "dtype": "string"}, {"name": "article", "dtype": "string"}]}, {"name": "summaries", "list": [{"name": "text", "dtype": "string"}, {"name": "policy", "dtype": "string"}, {"name": "note", "dtype": "string"}]}, {"name": "choice", "dtype": "int32"}, {"name": "worker", "dtype": "string"}, {"name": "batch", "dtype": "string"}, {"name": "split", "dtype": "string"}, {"name": "extra", "struct": [{"name": "confidence", "dtype": "int32"}]}, {"name": "query_token", "sequence": "int64"}, {"name": "query", "dtype": "string"}, {"name": "response0", "dtype": "string"}, {"name": "response0_token", "sequence": "int64"}, {"name": "response0_token_len", "dtype": "int64"}, {"name": "response1", "dtype": "string"}, {"name": "response1_token", "sequence": "int64"}, {"name": "response1_token_len", "dtype": "int64"}, {"name": "response0_policy", "dtype": "string"}, {"name": "response1_policy", "dtype": "string"}, {"name": "policies", "dtype": "string"}, {"name": "query_response0", "dtype": "string"}, {"name": "query_response0_token", "sequence": "int64"}, {"name": "query_response0_token_len", "dtype": "int64"}, {"name": "query_response1", "dtype": "string"}, {"name": "query_response1_token", "sequence": "int64"}, {"name": "query_response1_token_len", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 2227391552, "num_examples": 92858}, {"name": "validation", "num_bytes": 2069857572, "num_examples": 86086}], "download_size": 0, "dataset_size": 4297249124}}
2023-12-24T22:52:21+00:00
[]
[]
TAGS #region-us
# Dataset Card for "summarize_from_feedback_oai_preprocessing_gpt2_153" More Information needed
[ "# Dataset Card for \"summarize_from_feedback_oai_preprocessing_gpt2_153\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"summarize_from_feedback_oai_preprocessing_gpt2_153\"\n\nMore Information needed" ]
[ 6, 31 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"summarize_from_feedback_oai_preprocessing_gpt2_153\"\n\nMore Information needed" ]
e54c05f5f5eb5c852d2692de02cd36dfccf036ba
# TL;DR SFT Dataset for OpenAI's [Summarize from Feedback](https://openai.com/blog/summarization/) task The dataset is directly taken from https://github.com/openai/summarize-from-feedback/tree/700967448d10004279f138666442bf1497d0e705#reddit-tldr-dataset These columns are taken directly from the aforementioned dataset: * **id**: unique identifier for the post * **subreddit**: subreddit the post was taken from * **title**: title of the post * **post**: body of the post * **summary**: summary of the post * **reference_response**: reference response for the post These columns are added by this preprocessing script: * **query**: length-limited query for summarization: OAI pre-processes the main text (title + subreddit + post), ensuring it has only 512 tokens; if the main text is too long, then it tries to truncate at the last ` `. If it's too short it pads the main text ([summarize_from_feedback/tasks.py#L98-L165](https://github.com/openai/summarize-from-feedback/blob/700967448d10004279f138666442bf1497d0e705/summarize_from_feedback/tasks.py#L98-L165)). Padding is either space or `[PAD]` token (see Args below). * **query_token**: tokenized version of `query` * **reference_response_token**: tokenized version of `reference_response` * **reference_response_token_len**: length of `reference_response_token` * **query_reference_response**: concatenation of `query.strip()` and `reference_response` * **query_reference_response_token**: tokenized version of `query_reference_response`, up to `max_sft_query_response_length` tokens * **query_reference_response_token_len**: length of `query_reference_response_token` # Args ```python {'base_model': 'EleutherAI/pythia-160m', 'hf_entity': 'vwxyzjn', 'max_rm_query_response_length': 638, 'max_rm_response_length': 169, 'max_sft_query_response_length': 562, 'max_sft_response_length': 53} {'format_str': 'SUBREDDIT: r/{subreddit}\n' '\n' 'TITLE: {title}\n' '\n' 'POST: {post}\n' '\n' 'TL;DR:', 'length': 512, 'pad_side': 'left', 'padding': [209], 'truncate_field': 'post', 'truncate_text': '\n'} ```
vwxyzjn/summarize_from_feedback_tldr_3_filtered_oai_preprocessing_pythia-160m_53
[ "region:us" ]
2023-11-28T20:28:58+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "subreddit", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "post", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "query_token", "sequence": "int64"}, {"name": "query", "dtype": "string"}, {"name": "reference_response", "dtype": "string"}, {"name": "reference_response_token", "sequence": "int64"}, {"name": "reference_response_token_len", "dtype": "int64"}, {"name": "query_reference_response", "dtype": "string"}, {"name": "query_reference_response_token", "sequence": "int64"}, {"name": "query_reference_response_token_len", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1600440249, "num_examples": 116722}, {"name": "validation", "num_bytes": 88425771, "num_examples": 6447}, {"name": "test", "num_bytes": 89922466, "num_examples": 6553}], "download_size": 551824801, "dataset_size": 1778788486}}
2023-12-24T23:01:57+00:00
[]
[]
TAGS #region-us
# TL;DR SFT Dataset for OpenAI's Summarize from Feedback task The dataset is directly taken from URL These columns are taken directly from the aforementioned dataset: * id: unique identifier for the post * subreddit: subreddit the post was taken from * title: title of the post * post: body of the post * summary: summary of the post * reference_response: reference response for the post These columns are added by this preprocessing script: * query: length-limited query for summarization: OAI pre-processes the main text (title + subreddit + post), ensuring it has only 512 tokens; if the main text is too long, then it tries to truncate at the last ' '. If it's too short it pads the main text (summarize_from_feedback/URL#L98-L165). Padding is either space or '[PAD]' token (see Args below). * query_token: tokenized version of 'query' * reference_response_token: tokenized version of 'reference_response' * reference_response_token_len: length of 'reference_response_token' * query_reference_response: concatenation of 'URL()' and 'reference_response' * query_reference_response_token: tokenized version of 'query_reference_response', up to 'max_sft_query_response_length' tokens * query_reference_response_token_len: length of 'query_reference_response_token' # Args
[ "# TL;DR SFT Dataset for OpenAI's Summarize from Feedback task\n\nThe dataset is directly taken from URL\n\nThese columns are taken directly from the aforementioned dataset:\n\n* id: unique identifier for the post\n* subreddit: subreddit the post was taken from\n* title: title of the post\n* post: body of the post\n* summary: summary of the post\n* reference_response: reference response for the post\n\nThese columns are added by this preprocessing script:\n* query: length-limited query for summarization: OAI pre-processes the main text (title + subreddit + post), ensuring it has only 512 tokens; if the main text is too long, then it tries to truncate at the last '\n'. If it's too short it pads the main text (summarize_from_feedback/URL#L98-L165). Padding is either space or '[PAD]' token (see Args below).\n* query_token: tokenized version of 'query'\n* reference_response_token: tokenized version of 'reference_response'\n* reference_response_token_len: length of 'reference_response_token'\n* query_reference_response: concatenation of 'URL()' and 'reference_response'\n* query_reference_response_token: tokenized version of 'query_reference_response', up to 'max_sft_query_response_length' tokens\n* query_reference_response_token_len: length of 'query_reference_response_token'", "# Args" ]
[ "TAGS\n#region-us \n", "# TL;DR SFT Dataset for OpenAI's Summarize from Feedback task\n\nThe dataset is directly taken from URL\n\nThese columns are taken directly from the aforementioned dataset:\n\n* id: unique identifier for the post\n* subreddit: subreddit the post was taken from\n* title: title of the post\n* post: body of the post\n* summary: summary of the post\n* reference_response: reference response for the post\n\nThese columns are added by this preprocessing script:\n* query: length-limited query for summarization: OAI pre-processes the main text (title + subreddit + post), ensuring it has only 512 tokens; if the main text is too long, then it tries to truncate at the last '\n'. If it's too short it pads the main text (summarize_from_feedback/URL#L98-L165). Padding is either space or '[PAD]' token (see Args below).\n* query_token: tokenized version of 'query'\n* reference_response_token: tokenized version of 'reference_response'\n* reference_response_token_len: length of 'reference_response_token'\n* query_reference_response: concatenation of 'URL()' and 'reference_response'\n* query_reference_response_token: tokenized version of 'query_reference_response', up to 'max_sft_query_response_length' tokens\n* query_reference_response_token_len: length of 'query_reference_response_token'", "# Args" ]
[ 6, 384, 3 ]
[ "passage: TAGS\n#region-us \n# TL;DR SFT Dataset for OpenAI's Summarize from Feedback task\n\nThe dataset is directly taken from URL\n\nThese columns are taken directly from the aforementioned dataset:\n\n* id: unique identifier for the post\n* subreddit: subreddit the post was taken from\n* title: title of the post\n* post: body of the post\n* summary: summary of the post\n* reference_response: reference response for the post\n\nThese columns are added by this preprocessing script:\n* query: length-limited query for summarization: OAI pre-processes the main text (title + subreddit + post), ensuring it has only 512 tokens; if the main text is too long, then it tries to truncate at the last '\n'. If it's too short it pads the main text (summarize_from_feedback/URL#L98-L165). Padding is either space or '[PAD]' token (see Args below).\n* query_token: tokenized version of 'query'\n* reference_response_token: tokenized version of 'reference_response'\n* reference_response_token_len: length of 'reference_response_token'\n* query_reference_response: concatenation of 'URL()' and 'reference_response'\n* query_reference_response_token: tokenized version of 'query_reference_response', up to 'max_sft_query_response_length' tokens\n* query_reference_response_token_len: length of 'query_reference_response_token'# Args" ]
da6f8f9fda14201d119047b9c6c6cd647037e33f
# Dataset Card for "summarize_from_feedback_oai_preprocessing_pythia-160m_169" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
vwxyzjn/summarize_from_feedback_oai_preprocessing_pythia-160m_169
[ "region:us" ]
2023-11-28T20:29:54+00:00
{"dataset_info": {"features": [{"name": "info", "struct": [{"name": "id", "dtype": "string"}, {"name": "post", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "subreddit", "dtype": "string"}, {"name": "site", "dtype": "string"}, {"name": "article", "dtype": "string"}]}, {"name": "summaries", "list": [{"name": "text", "dtype": "string"}, {"name": "policy", "dtype": "string"}, {"name": "note", "dtype": "string"}]}, {"name": "choice", "dtype": "int32"}, {"name": "worker", "dtype": "string"}, {"name": "batch", "dtype": "string"}, {"name": "split", "dtype": "string"}, {"name": "extra", "struct": [{"name": "confidence", "dtype": "int32"}]}, {"name": "query_token", "sequence": "int64"}, {"name": "query", "dtype": "string"}, {"name": "response0", "dtype": "string"}, {"name": "response0_token", "sequence": "int64"}, {"name": "response0_token_len", "dtype": "int64"}, {"name": "response1", "dtype": "string"}, {"name": "response1_token", "sequence": "int64"}, {"name": "response1_token_len", "dtype": "int64"}, {"name": "response0_policy", "dtype": "string"}, {"name": "response1_policy", "dtype": "string"}, {"name": "policies", "dtype": "string"}, {"name": "query_response0", "dtype": "string"}, {"name": "query_response0_token", "sequence": "int64"}, {"name": "query_response0_token_len", "dtype": "int64"}, {"name": "query_response1", "dtype": "string"}, {"name": "query_response1_token", "sequence": "int64"}, {"name": "query_response1_token_len", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 2210564467, "num_examples": 92858}, {"name": "validation", "num_bytes": 2054238499, "num_examples": 86086}], "download_size": 271347530, "dataset_size": 4264802966}}
2023-12-24T23:04:14+00:00
[]
[]
TAGS #region-us
# Dataset Card for "summarize_from_feedback_oai_preprocessing_pythia-160m_169" More Information needed
[ "# Dataset Card for \"summarize_from_feedback_oai_preprocessing_pythia-160m_169\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"summarize_from_feedback_oai_preprocessing_pythia-160m_169\"\n\nMore Information needed" ]
[ 6, 33 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"summarize_from_feedback_oai_preprocessing_pythia-160m_169\"\n\nMore Information needed" ]
14a4b752c3f885bfd46bb7e50d334c3912826c9e
# Dataset Card for "es-2811-no-demoji-m" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
gg-ai/es-2811-no-demoji-m
[ "region:us" ]
2023-11-28T20:34:46+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}, {"split": "val", "path": "data/val-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "clean_text", "dtype": "string"}, {"name": "sent", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 5835720, "num_examples": 16694}, {"name": "test", "num_bytes": 1241079, "num_examples": 3547}, {"name": "val", "num_bytes": 213642, "num_examples": 626}], "download_size": 4538232, "dataset_size": 7290441}}
2023-11-28T20:34:51+00:00
[]
[]
TAGS #region-us
# Dataset Card for "es-2811-no-demoji-m" More Information needed
[ "# Dataset Card for \"es-2811-no-demoji-m\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"es-2811-no-demoji-m\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"es-2811-no-demoji-m\"\n\nMore Information needed" ]
902d1f41cc22600653dcdb0835123a9f710c0a64
data for 182
aldenn13l/182-fine-tune
[ "region:us" ]
2023-11-28T21:28:08+00:00
{"dataset_info": {"features": [{"name": "original_image", "dtype": "image"}, {"name": "edit_prompt", "dtype": "string"}, {"name": "new_image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 1432179908.95, "num_examples": 1291}], "download_size": 1428584756, "dataset_size": 1432179908.95}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-12-06T09:27:26+00:00
[]
[]
TAGS #region-us
data for 182
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
5e2f486995e97034ef68a711d5c8fa8a1269ae5c
# Dataset Card for "french_poetry" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
manu/french_poetry
[ "region:us" ]
2023-11-28T22:18:48+00:00
{"dataset_info": {"features": [{"name": "title", "dtype": "string"}, {"name": "poet", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "link", "dtype": "string"}, {"name": "id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3286614, "num_examples": 1821}], "download_size": 2072389, "dataset_size": 3286614}}
2023-11-28T22:24:06+00:00
[]
[]
TAGS #region-us
# Dataset Card for "french_poetry" More Information needed
[ "# Dataset Card for \"french_poetry\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"french_poetry\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"french_poetry\"\n\nMore Information needed" ]
1802898ca3935339dd37117394f47682b535ed62
# Dataset Card for "pretrain_sts_extend" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
xwjzds/pretrain_sts_extend
[ "region:us" ]
2023-11-28T22:44:25+00:00
{"dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1664682, "num_examples": 5657}], "download_size": 1024627, "dataset_size": 1664682}}
2023-11-28T22:44:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for "pretrain_sts_extend" More Information needed
[ "# Dataset Card for \"pretrain_sts_extend\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"pretrain_sts_extend\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"pretrain_sts_extend\"\n\nMore Information needed" ]
e0430c60721c9f61cf90055a8cb77d5eca7b0b95
# ChatML DPO Pairs This is a preprocessed version of [Intel/orca_dpo_pairs](https://huggingface.co/datasets/Intel/orca_dpo_pairs) using the [ChatML](https://huggingface.co/docs/transformers/chat_templating) format. Like the original dataset, it contains 12k examples from [Orca](https://arxiv.org/abs/2306.02707) style dataset [Open-Orca/OpenOrca](https://huggingface.co/datasets/Open-Orca/OpenOrca). Here is the code used to preprocess it: ```python def chatml_format(example): # Format system if len(example['system']) > 0: message = {"role": "system", "content": example['system']} system = tokenizer.apply_chat_template([message], tokenize=False) else: system = "" # Format instruction message = {"role": "user", "content": example['question']} prompt = tokenizer.apply_chat_template([message], tokenize=False, add_generation_prompt=True) # Format chosen answer chosen = example['chatgpt'] + "<|im_end|>\n" # Format rejected answer rejected = example['llama2-13b-chat'] + "<|im_end|>\n" return { "prompt": system + prompt, "chosen": chosen, "rejected": rejected, } # Load dataset dataset = load_dataset("Intel/orca_dpo_pairs")['train'] # Save columns original_columns = dataset.column_names # Tokenizer tokenizer = AutoTokenizer.from_pretrained("teknium/OpenHermes-2.5-Mistral-7B") tokenizer.pad_token = tokenizer.eos_token tokenizer.padding_side = "left" # Format dataset dataset = dataset.map( chatml_format, remove_columns=original_columns ) ```
mlabonne/chatml_dpo_pairs
[ "task_categories:text-generation", "task_categories:question-answering", "task_categories:summarization", "task_categories:conversational", "size_categories:10K<n<100K", "license:apache-2.0", "arxiv:2306.02707", "region:us" ]
2023-11-28T23:22:38+00:00
{"license": "apache-2.0", "size_categories": ["10K<n<100K"], "task_categories": ["text-generation", "question-answering", "summarization", "conversational"], "pretty_name": "ChatML DPO Pairs", "dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "chosen", "dtype": "string"}, {"name": "rejected", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 35914686, "num_examples": 12859}], "download_size": 19539811, "dataset_size": 35914686}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-28T23:27:55+00:00
[ "2306.02707" ]
[]
TAGS #task_categories-text-generation #task_categories-question-answering #task_categories-summarization #task_categories-conversational #size_categories-10K<n<100K #license-apache-2.0 #arxiv-2306.02707 #region-us
# ChatML DPO Pairs This is a preprocessed version of Intel/orca_dpo_pairs using the ChatML format. Like the original dataset, it contains 12k examples from Orca style dataset Open-Orca/OpenOrca. Here is the code used to preprocess it:
[ "# ChatML DPO Pairs\n\nThis is a preprocessed version of Intel/orca_dpo_pairs using the ChatML format.\n\nLike the original dataset, it contains 12k examples from Orca style dataset Open-Orca/OpenOrca.\n\nHere is the code used to preprocess it:" ]
[ "TAGS\n#task_categories-text-generation #task_categories-question-answering #task_categories-summarization #task_categories-conversational #size_categories-10K<n<100K #license-apache-2.0 #arxiv-2306.02707 #region-us \n", "# ChatML DPO Pairs\n\nThis is a preprocessed version of Intel/orca_dpo_pairs using the ChatML format.\n\nLike the original dataset, it contains 12k examples from Orca style dataset Open-Orca/OpenOrca.\n\nHere is the code used to preprocess it:" ]
[ 77, 69 ]
[ "passage: TAGS\n#task_categories-text-generation #task_categories-question-answering #task_categories-summarization #task_categories-conversational #size_categories-10K<n<100K #license-apache-2.0 #arxiv-2306.02707 #region-us \n# ChatML DPO Pairs\n\nThis is a preprocessed version of Intel/orca_dpo_pairs using the ChatML format.\n\nLike the original dataset, it contains 12k examples from Orca style dataset Open-Orca/OpenOrca.\n\nHere is the code used to preprocess it:" ]
8185e74b8e6f29af244c46be9c4af9d83a3b2576
# Dataset Card for Dataset Name <!-- Provide a quick summary of the dataset. --> Language models (LMs) commonly report perplexity on monolithic data held out from the training distribution. Implicitly or explicitly, this data is composed of domains—variations in the distribution of language. Rather than assuming perplexity on one distribution extrapolates to others, Perplexity Analysis for Language Model Assessment (Paloma) measures LM fit to 585 text domains, ranging from NY Times to r/depression on Reddit. ## Dataset Details ### Benchmark Inference and Submissions We invite submissions to our benchmark and organize results by comparability based on compliance with guidelines such as the removal of benchmark contamination from pretraining. Standardized inference code for running comprable evaluations and details about making submissions to the Paloma benchmark can be found at the following link. [How to evaluate and how to submit](https://github.com/allenai/ai2-olmo-eval/blob/main/paloma/README.md) ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> Paloma is for examining relative differences in LM fit on domains. We take these relative differences as a proxy of model fit to the shared knowledge, values, and social context that position the humans producing language in a domain. While we expect contemporary LMs to have a limited fit to the most complex of these latent factors of domains, improving fit to all factors is necessary both to improve perplexity and for any actual use of the LM. For example, better perplexity on a particular dialect of English suggests that that model will make a better chatbot for people that speak that dialect. The sources of evaluation data in Paloma were selected based on the following desiderata: 1) including known resources, 2) including fine-grained domains, 3) including domains representing specific communities of interest. Different lines of research will require different selections of domains; Paloma aims to enable research on differences in LM fit over the hundreds of domains that are readily available in existing metadata. Note that we are not able to re-host 2 of the 18 sources in Paloma comprising 39 domains. These are The Pile and ICE. The ICE corpus is available on request to the original authors following the instructions [here](https://www.ice-corpora.uzh.ch/en/access.html). **Curated by:** Ian Magnusson, Akshita Bhagia, Valentin Hofmann, Luca Soldaini, Ananya Harsh Jha, Oyvind Tafjord, Dustin Schwenk, Evan Pete Walsh, Yanai Elazar, Kyle Lo, Dirk Groeneveld, Iz Beltagy, Hannaneh Hajishirzi, Noah A. Smith, Kyle Richardson, and Jesse Dodge **Languages:** We elect to focus just on the language modeling of English and code data. **License:** The data subsets are licensed under the AI2 ImpACT License - Low Risk Artifacts, except as listed below. - Wikitext-103 - CC BY-SA - TwitterAAE - for research purposes only - Red Pajama - see license details - M2D2 - CC BY-NC **Paper:** https://arxiv.org/abs/2312.10523 ### Dataset Sources <!-- Provide the basic links for the dataset. --> <!-- - [Paper]() -- (TODO update when paper is preprinted) --> <!-- - [Website](paloma.allen.ai) --> - [Code](https://github.com/allenai/ai2-olmo-eval/blob/main/paloma/README.md) - Paloma 1B Baseline Models: [Dolma](https://huggingface.co/allenai/paloma-1b-baseline-dolma), [Pile](https://huggingface.co/allenai/paloma-1b-baseline-pile), [RedPajama](https://huggingface.co/allenai/paloma-1b-baseline-redpajama), [C4](https://huggingface.co/allenai/paloma-1b-baseline-c4), [mC4-en](https://huggingface.co/allenai/paloma-1b-baseline-mc4), [Falcon-RefinedWeb](https://huggingface.co/allenai/paloma-1b-baseline-falcon-refinedweb) ## Uses <!-- Address questions around how the dataset is intended to be used. --> This benchmark is intended for use in evaluating language model fit to fine-grained domains. ### Direct Use <!-- This section describes suitable use cases for the dataset. --> This dataset should be used for evaluating the likilihood of text from a given domain by a language model. ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> Note that the sources contained in this benchmark include varying licenses with differing restrictions (see [License](#dataset-description)) ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> The sources in this dataset are each organized into their own subcorpus. This consists of a `val` and `test` split. Data within this is organized as files with lines separated JSON data where each line represents a document and its associated metadata. The type of metadata available varies from source to source, but each line contains at least a field `'text'` which contains the text of the document. ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> Perplexity is conventionally reported on held out data from a model's training distribution or a small number of traditional test sets. Such monolithic evaluation ignores potential variation of model fit across different domains that LMs implicitly learn to model. We curate sources of fine-grained textual domains in Paloma to enable evaluation of language model fit to specific domains of text. Paloma is inspired by and incorporates previous work that curates corpora with marked domains (The Pile, M2D2, C4 100 Domains, ICE, TwitterAAE). We conduct a stratified subsample over domains where we set a minimum subsample size based on emperical estimation of the variance over subsamples. ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Standard language modeling benchmarks Though it is common practice to evaluate on held out data from the pretraining corpus of a given model, we evaluate *across* several major pretraining corpora and standard language modeling benchmarks. We also break down performance per domain within the datasets that have multiple domains. Note that although the Paloma benchmark analysis in our paper describes results on the Pile, we are not able to re-host this data. | Source | Citation | Description | |-------------------|-----------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | c4-en | Raffel et al (2019) via Dodge et al (2021) | Standard contemporary LM pretraining corpus automatically filtered from the April 2019 Common Crawl scrape | | mc4-en | Xue et al (2021) | The English language portion of a pretraining corpus automatically filtered from 71 Common Crawl scrapes | | Pile | Gao et al (2020) | Standard contemporary LM benchmark from curated multi-source data including large scale non-webscraped sources | | Wikitext-103 | Merity et al (2016) | A standard collection of verified “Good” and “Featured” articles on Wikipedia | | Penn Tree Bank | Marcus et al (1999) via Nunes, Davide. (2020) | Classic Wall Street Journal benchmark with linguistic structure annotations omitted | | RedPajama | Together Computer (2023) | A publicly available reproduction of the LLaMA (Touvron et al., 2023) pretraining source mixture, combining large amounts of webscraped text with smaller curated sources | | Falcon-RefinedWeb | Penedo et al. (2023) | A corpus of English sampled from all Common Crawl scrapes until June 2023, more aggressively filtered and deduplicated than c4 and mc4-en | | Dolma v1.5 | Soldaini et al. (2023) | A three trillion token corpus that samples sources commonly used to train LMs in order to enable open research on pretraining data | #### Fine-grained domain benchmarks Where typical pretraining corpora offer at most tens of labeled domains usually based on where the data is sourced, we examine datasets with up to an order of magnitude more domains. Existing datasets (M2D2 and c4 100 Domains) and datasets we curate from Dolma v1.5 use metadata to define hundreds of domains over Wikipedia, Semantic Scholar, Common Crawl, Reddit, and Github data. These include diverse domains from *Culture and the arts: Performing arts*, a topic on Wikipedia, to *r/depression*, a forum on Reddit for mental health support. | Source | Citation | Description | |---------------------------------|--------------------------------------------------|-----------------------------------------------------------------------------------| | M2D2 S2ORC | Reid et al (2022) | Papers from Semantic Scholar grouped by hierarchical academic field categories | | M2D2 Wiki | Reid et al (2022) | Wikipedia articles grouped by hierarchical categories in the Wikipedia ontology | | c4 100 Domains | Chronopoulou et al (2021) | Balanced samples of the top 100 URL domains in C4 | | Dolma 100 Subreddits | Soldaini et al. (2023) | Balanced samples of the top 100 Subreddits from the Dolma Reddit subset | | Dolma 100 Programming Languages | Kocetkov et al. (2022) via Soldaini et al. (2023) | Balanced samples of the top 100 programming languages from the Dolma Stack subset | #### Disparities between speech communities Some communities are known to be underserved by existing models. Following HELM, We measure disparities in performance on corpora of African American English and White aligned English from TwitterAAE, as well as nine corpora of English from different countries with the ICE dataset. Note that although the Paloma benchmark analysis in our paper describes results on ICE, we are not able to re-host this data. | Source | Citation | Description | |------------|----------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------| | ICE | Greenbaum and Nelson (1996) via Liang et al (2022) | English from around the world curated by local experts, with subsets for Canada, East Africa, Hong Kong, India, Ireland, Jamaica, Philippines, Singapore, and the USA | | TwitterAAE | Blodgett et al. (2016) via Liang et al (2022) | Balanced sets of tweets classified as African American or White aligned English | #### Fringe sources previously studied for problematic discourse Text from some fringe online communities has been shown to contain larger proportions of hate speech and toxicity than more mainstream sources. [Longpre et al. (2023)](https://arxiv.org/abs/2305.13169) have shown that varying amount of toxic content in pretraining data exhibits a tradeoff between non-toxic generation and ability to classify toxicity, indicating that model fit to discourse with toxicity is worth measuring. Measuring perplexity on Manosphere, Gab, and 4chan characterises model familiarity with distinct social contexts in which toxic language arises. | Source | Citation | Description | |-------------------|------------------------|---------------------------------------------------------------------------------------------------------------------------------------------| | Manosphere Corpus | Ribeiro et al (2020) | 9 forums where a set of related masculinist ideologies developed over the 2000s and 2010s | | Gab Corpus | Zannettou et al (2018) | Data from 2016-18 from an alt-right, free-speech-oriented social media platform shown to contain more hate speech than mainstream platforms | | 4chan Corpus | Papasavva et al (2020) | Data from 2016-19 from a politics subforum of an anonymity-focused forum found to contain among the highest rates of toxic content | #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> The data in Paloma are sampled from existing sources. Most often perplexity evaluation data is subsampled uniformly over the original distribution of domains in a source, resulting in more or less tokens from each domain in the evaluation data based on how well represented they are in the corpus. We instead employ stratified sampling, in which all sources with marked domains are partitioned by domain and a uniform sample of the same size is taken from each partition. Specifically, documents are sampled from each domain until a target number of tokens is reached. This helps ensure that no domains are lost or very small after subsampling. In social media domains with additional metadata that is typically displayed along with posts, we format metadata such as timestamps into the document `'text'` field. Where information is available about how threads of posts are connected, documents in that domain contain all posts in a given thread. Additional details on source specific processing are available in our paper. #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> Text data from each of the sources curated in Paloma is created by varying sets of original authors. Some sources are collected from users of specific internet fora such as specific subreddits. Other data is collected on the basis of expert or automated classification of demographic groups. Other data is collected from authors of archival material including scientific preprints, Wikipedia, and code repositories. Lastly, data sampled from standard pretraining corpora comes from authors collected through automatic webscrapping and large scale sampling of archival sources, making it difficult to recover much specific information about these authors. #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> No annotation is done on this data. #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> No annotation is done on this data. #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> Sources in Paloma may contain personally identifiable information (PII). No attempt is made to measure or remove this information for the following reason: Paloma provides a small subsample of already publicly available data. The small size of this subsample renders this data less useful for aggregation of PII information than the already available public sources which we subsample. ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> It is beyond the scope of any one group of researchers to prescribe an exhaustive set of domains that should be examined for a LM. Rather Paloma brings together a substantial selection of domains that are identifiable from already available metadata to demonstrate the kinds of analyses possible with hundreds of domains and rigorous experimental controls. Different research goals will motivate different definitions and selections of domains, but other researchers can apply the guidelines we detail in our paper to novel fine-grained domains suitable for their research questions. One of the key advantages of evaluating a model by its fit to a collection of text representing a domain is that such domains can be identified not just by researchers who study LMs. We hope future work will identify many more domains that no one discipline would think to look at. In Paloma, we distinguish sources from domains, although not all cases permit such easy distinction. We use *source* to refer to a selection of data that is characterized by the decisions of the people who curated that data, whether that curation is automatic as in scraping C4 or manual as in selecting the subcorpora of the The Pile. By contrast we use *domain* to refer to a set of documents that belong together because they are originally produced by a group of humans that share a distinct social context. Considered as such, domains may overlap; a document's author may belong to the set of English speakers in Jamaica and the set of AI researchers. Further note, that domains are often latent categorizations which we only approximate because complete metadata does not exist. Also, some domains in Paloma appear in multiple sources, such as academic papers. Though The Pile and RedPajama process academic papers differently, the subcorpora on academic papers in each source represent different approximations of the same or very similar domains. However for the sake of simplicity, we make the reductive assumption of counting all 585 domains in Paloma as fully distinct. ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> In our paper we outline guidelines for evaluating language model fit. We encourage users of Paloma to adopt these experimental controls for metric variance when subsampling, benchmark contamination, differing tokenization, training data order, and evaluation data format. ## Citation <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** ``` @article{paloma, title={{Paloma}: A Benchmark for Evaluating Language Model Fit}, author={Magnusson, Ian and Bhagia, Akshita and Hofmann, Valentin and Soldaini, Luca and Harsh Jha, Ananya and Tafjord, Oyvind and Schwenk,Dustin and Walsh, Evan Pete and Elazar, Yanai and Lo, Kyle and Groenveld,Dirk and Beltagy,Iz and Hajishirz,Hanneneh and Smith, Noah A. and Richardson,Kyle and Dodge,Jesse}, journal={technical report}, year={2023}, url={https://paloma.allen.ai/} } ``` <!-- [More Information Needed] --> ## Dataset Card Contact {ianm,jessed}@allenai.org
allenai/paloma
[ "arxiv:2312.10523", "arxiv:2305.13169", "region:us" ]
2023-11-28T23:51:50+00:00
{"extra_gated_prompt": "Access to this dataset is automatically granted upon accepting the [**AI2 ImpACT License \u2013 Low Risk Artifacts (\u201cLR Agreement\u201d)**](https://allenai.org/licenses/impact-lr) and completing all fields below. All data subsets in this dataset are licensed under the LR Agreement, except for those as listed in the 'License' section of the Dataset Card.", "extra_gated_fields": {"Your full name": "text", "Organization or entity you are affiliated with": "text", "State or country you are located in": "text", "Contact email": "text", "Please describe your intended use of the low risk artifact(s)": "text", "I AGREE to the terms and conditions of the LR Agreement above": "checkbox", "I AGREE to AI2\u2019s use of my information for legal notices and administrative matters": "checkbox", "I CERTIFY that the information I have provided is true and accurate": "checkbox"}, "dataset_info": [{"config_name": "4chan_meta_sep", "features": [{"name": "text", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "added", "dtype": "string"}, {"name": "source", "dtype": "string"}, {"name": "metadata", "struct": [{"name": "original_ids", "sequence": "int64"}, {"name": "original_times", "sequence": "int64"}, {"name": "semantic_url", "dtype": "string"}, {"name": "truncated_portion", "dtype": "string"}]}]}, {"config_name": "c4_100_domains", "features": [{"name": "text", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "added", "dtype": "string"}, {"name": "source", "dtype": "string"}, {"name": "subdomain", "dtype": "string"}]}, {"config_name": "c4_en", "features": [{"name": "text", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "added", "dtype": "string"}, {"name": "source", "dtype": "string"}, {"name": "metadata", "struct": [{"name": "url", "dtype": "string"}, {"name": "date", "dtype": "string"}, {"name": "truncated_portion", "dtype": "string"}]}, {"config_name": "dolma-v1_5", "features": [{"name": "text", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "added", "dtype": "string"}, {"name": "source", "dtype": "string"}, {"name": "subdomain", "dtype": "string"}, {"name": "metadata", "dtype": "struct"}]}, {"config_name": "dolma_100_programming_languages_no_attributes", "features": [{"name": "text", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "added", "dtype": "string"}, {"name": "source", "dtype": "string"}, {"name": "subdomain", "dtype": "string"}, {"name": "metadata", "dtype": "struct"}, {"name": "timestamp", "dtype": "timestamp[s]"}]}]}], "configs": [{"config_name": "4chan_meta_sep", "data_files": [{"split": "val", "path": "4chan_meta_sep/val/*"}, {"split": "test", "path": "4chan_meta_sep/test/*"}]}, {"config_name": "c4_100_domains", "data_files": [{"split": "val", "path": "c4_100_domains/val/*"}, {"split": "test", "path": "c4_100_domains/test/*"}]}, {"config_name": "c4_en", "data_files": [{"split": "val", "path": "c4_en/val/*"}, {"split": "test", "path": "c4_en/test/*"}]}, {"config_name": "dolma-v1_5", "data_files": [{"split": "val", "path": "dolma-v1_5/val/*"}, {"split": "test", "path": "dolma-v1_5/test/*"}]}, {"config_name": "dolma_100_programming_languages_no_attributes", "data_files": [{"split": "val", "path": "dolma_100_programming_languages_no_attributes/val/*"}, {"split": "test", "path": "dolma_100_programming_languages_no_attributes/test/*"}]}, {"config_name": "dolma_100_subreddits", "data_files": [{"split": "val", "path": "dolma_100_subreddits/val/*"}, {"split": "test", "path": "dolma_100_subreddits/test/*"}]}, {"config_name": "falcon-refinedweb", "data_files": [{"split": "val", "path": "falcon-refinedweb/val/*"}, {"split": "test", "path": "falcon-refinedweb/test/*"}]}, {"config_name": "gab", "data_files": [{"split": "val", "path": "gab/val/*"}, {"split": "test", "path": "gab/test/*"}]}, {"config_name": "m2d2_s2orc_unsplit", "data_files": [{"split": "val", "path": "m2d2_s2orc_unsplit/val/*"}, {"split": "test", "path": "m2d2_s2orc_unsplit/test/*"}]}, {"config_name": "m2d2_wikipedia_unsplit", "data_files": [{"split": "val", "path": "m2d2_wikipedia_unsplit/val/*"}, {"split": "test", "path": "m2d2_wikipedia_unsplit/test/*"}]}, {"config_name": "manosphere_meta_sep", "data_files": [{"split": "val", "path": "manosphere_meta_sep/val/*"}, {"split": "test", "path": "manosphere_meta_sep/test/*"}]}, {"config_name": "mc4", "data_files": [{"split": "val", "path": "mc4/val/*"}, {"split": "test", "path": "mc4/test/*"}]}, {"config_name": "ptb", "data_files": [{"split": "val", "path": "ptb/val/*"}, {"split": "test", "path": "ptb/test/*"}]}, {"config_name": "redpajama", "data_files": [{"split": "val", "path": "redpajama/val/*"}, {"split": "test", "path": "redpajama/test/*"}]}, {"config_name": "twitterAAE_HELM_fixed", "data_files": [{"split": "val", "path": "twitterAAE_HELM_fixed/val/*"}, {"split": "test", "path": "twitterAAE_HELM_fixed/test/*"}]}, {"config_name": "wikitext_103", "data_files": [{"split": "val", "path": "wikitext_103/val/*"}, {"split": "test", "path": "wikitext_103/test/*"}]}]}
2023-12-20T21:55:08+00:00
[ "2312.10523", "2305.13169" ]
[]
TAGS #arxiv-2312.10523 #arxiv-2305.13169 #region-us
Dataset Card for Dataset Name ============================= Language models (LMs) commonly report perplexity on monolithic data held out from the training distribution. Implicitly or explicitly, this data is composed of domains—variations in the distribution of language. Rather than assuming perplexity on one distribution extrapolates to others, Perplexity Analysis for Language Model Assessment (Paloma) measures LM fit to 585 text domains, ranging from NY Times to r/depression on Reddit. Dataset Details --------------- ### Benchmark Inference and Submissions We invite submissions to our benchmark and organize results by comparability based on compliance with guidelines such as the removal of benchmark contamination from pretraining. Standardized inference code for running comprable evaluations and details about making submissions to the Paloma benchmark can be found at the following link. How to evaluate and how to submit ### Dataset Description Paloma is for examining relative differences in LM fit on domains. We take these relative differences as a proxy of model fit to the shared knowledge, values, and social context that position the humans producing language in a domain. While we expect contemporary LMs to have a limited fit to the most complex of these latent factors of domains, improving fit to all factors is necessary both to improve perplexity and for any actual use of the LM. For example, better perplexity on a particular dialect of English suggests that that model will make a better chatbot for people that speak that dialect. The sources of evaluation data in Paloma were selected based on the following desiderata: 1) including known resources, 2) including fine-grained domains, 3) including domains representing specific communities of interest. Different lines of research will require different selections of domains; Paloma aims to enable research on differences in LM fit over the hundreds of domains that are readily available in existing metadata. Note that we are not able to re-host 2 of the 18 sources in Paloma comprising 39 domains. These are The Pile and ICE. The ICE corpus is available on request to the original authors following the instructions here. Curated by: Ian Magnusson, Akshita Bhagia, Valentin Hofmann, Luca Soldaini, Ananya Harsh Jha, Oyvind Tafjord, Dustin Schwenk, Evan Pete Walsh, Yanai Elazar, Kyle Lo, Dirk Groeneveld, Iz Beltagy, Hannaneh Hajishirzi, Noah A. Smith, Kyle Richardson, and Jesse Dodge Languages: We elect to focus just on the language modeling of English and code data. License: The data subsets are licensed under the AI2 ImpACT License - Low Risk Artifacts, except as listed below. * Wikitext-103 - CC BY-SA * TwitterAAE - for research purposes only * Red Pajama - see license details * M2D2 - CC BY-NC Paper: URL ### Dataset Sources * Code * Paloma 1B Baseline Models: Dolma, Pile, RedPajama, C4, mC4-en, Falcon-RefinedWeb Uses ---- This benchmark is intended for use in evaluating language model fit to fine-grained domains. ### Direct Use This dataset should be used for evaluating the likilihood of text from a given domain by a language model. ### Out-of-Scope Use Note that the sources contained in this benchmark include varying licenses with differing restrictions (see License) Dataset Structure ----------------- The sources in this dataset are each organized into their own subcorpus. This consists of a 'val' and 'test' split. Data within this is organized as files with lines separated JSON data where each line represents a document and its associated metadata. The type of metadata available varies from source to source, but each line contains at least a field ''text'' which contains the text of the document. Dataset Creation ---------------- ### Curation Rationale Perplexity is conventionally reported on held out data from a model's training distribution or a small number of traditional test sets. Such monolithic evaluation ignores potential variation of model fit across different domains that LMs implicitly learn to model. We curate sources of fine-grained textual domains in Paloma to enable evaluation of language model fit to specific domains of text. Paloma is inspired by and incorporates previous work that curates corpora with marked domains (The Pile, M2D2, C4 100 Domains, ICE, TwitterAAE). We conduct a stratified subsample over domains where we set a minimum subsample size based on emperical estimation of the variance over subsamples. ### Source Data #### Standard language modeling benchmarks Though it is common practice to evaluate on held out data from the pretraining corpus of a given model, we evaluate *across* several major pretraining corpora and standard language modeling benchmarks. We also break down performance per domain within the datasets that have multiple domains. Note that although the Paloma benchmark analysis in our paper describes results on the Pile, we are not able to re-host this data. Source: c4-en, Citation: Raffel et al (2019) via Dodge et al (2021), Description: Standard contemporary LM pretraining corpus automatically filtered from the April 2019 Common Crawl scrape Source: mc4-en, Citation: Xue et al (2021), Description: The English language portion of a pretraining corpus automatically filtered from 71 Common Crawl scrapes Source: Pile, Citation: Gao et al (2020), Description: Standard contemporary LM benchmark from curated multi-source data including large scale non-webscraped sources Source: Wikitext-103, Citation: Merity et al (2016), Description: A standard collection of verified “Good” and “Featured” articles on Wikipedia Source: Penn Tree Bank, Citation: Marcus et al (1999) via Nunes, Davide. (2020), Description: Classic Wall Street Journal benchmark with linguistic structure annotations omitted Source: RedPajama, Citation: Together Computer (2023), Description: A publicly available reproduction of the LLaMA (Touvron et al., 2023) pretraining source mixture, combining large amounts of webscraped text with smaller curated sources Source: Falcon-RefinedWeb, Citation: Penedo et al. (2023), Description: A corpus of English sampled from all Common Crawl scrapes until June 2023, more aggressively filtered and deduplicated than c4 and mc4-en Source: Dolma v1.5, Citation: Soldaini et al. (2023), Description: A three trillion token corpus that samples sources commonly used to train LMs in order to enable open research on pretraining data #### Fine-grained domain benchmarks Where typical pretraining corpora offer at most tens of labeled domains usually based on where the data is sourced, we examine datasets with up to an order of magnitude more domains. Existing datasets (M2D2 and c4 100 Domains) and datasets we curate from Dolma v1.5 use metadata to define hundreds of domains over Wikipedia, Semantic Scholar, Common Crawl, Reddit, and Github data. These include diverse domains from *Culture and the arts: Performing arts*, a topic on Wikipedia, to *r/depression*, a forum on Reddit for mental health support. Source: M2D2 S2ORC, Citation: Reid et al (2022), Description: Papers from Semantic Scholar grouped by hierarchical academic field categories Source: M2D2 Wiki, Citation: Reid et al (2022), Description: Wikipedia articles grouped by hierarchical categories in the Wikipedia ontology Source: c4 100 Domains, Citation: Chronopoulou et al (2021), Description: Balanced samples of the top 100 URL domains in C4 Source: Dolma 100 Subreddits, Citation: Soldaini et al. (2023), Description: Balanced samples of the top 100 Subreddits from the Dolma Reddit subset Source: Dolma 100 Programming Languages, Citation: Kocetkov et al. (2022) via Soldaini et al. (2023), Description: Balanced samples of the top 100 programming languages from the Dolma Stack subset #### Disparities between speech communities Some communities are known to be underserved by existing models. Following HELM, We measure disparities in performance on corpora of African American English and White aligned English from TwitterAAE, as well as nine corpora of English from different countries with the ICE dataset. Note that although the Paloma benchmark analysis in our paper describes results on ICE, we are not able to re-host this data. Source: ICE, Citation: Greenbaum and Nelson (1996) via Liang et al (2022), Description: English from around the world curated by local experts, with subsets for Canada, East Africa, Hong Kong, India, Ireland, Jamaica, Philippines, Singapore, and the USA Source: TwitterAAE, Citation: Blodgett et al. (2016) via Liang et al (2022), Description: Balanced sets of tweets classified as African American or White aligned English #### Fringe sources previously studied for problematic discourse Text from some fringe online communities has been shown to contain larger proportions of hate speech and toxicity than more mainstream sources. Longpre et al. (2023) have shown that varying amount of toxic content in pretraining data exhibits a tradeoff between non-toxic generation and ability to classify toxicity, indicating that model fit to discourse with toxicity is worth measuring. Measuring perplexity on Manosphere, Gab, and 4chan characterises model familiarity with distinct social contexts in which toxic language arises. Source: Manosphere Corpus, Citation: Ribeiro et al (2020), Description: 9 forums where a set of related masculinist ideologies developed over the 2000s and 2010s Source: Gab Corpus, Citation: Zannettou et al (2018), Description: Data from 2016-18 from an alt-right, free-speech-oriented social media platform shown to contain more hate speech than mainstream platforms Source: 4chan Corpus, Citation: Papasavva et al (2020), Description: Data from 2016-19 from a politics subforum of an anonymity-focused forum found to contain among the highest rates of toxic content #### Data Collection and Processing The data in Paloma are sampled from existing sources. Most often perplexity evaluation data is subsampled uniformly over the original distribution of domains in a source, resulting in more or less tokens from each domain in the evaluation data based on how well represented they are in the corpus. We instead employ stratified sampling, in which all sources with marked domains are partitioned by domain and a uniform sample of the same size is taken from each partition. Specifically, documents are sampled from each domain until a target number of tokens is reached. This helps ensure that no domains are lost or very small after subsampling. In social media domains with additional metadata that is typically displayed along with posts, we format metadata such as timestamps into the document ''text'' field. Where information is available about how threads of posts are connected, documents in that domain contain all posts in a given thread. Additional details on source specific processing are available in our paper. #### Who are the source data producers? Text data from each of the sources curated in Paloma is created by varying sets of original authors. Some sources are collected from users of specific internet fora such as specific subreddits. Other data is collected on the basis of expert or automated classification of demographic groups. Other data is collected from authors of archival material including scientific preprints, Wikipedia, and code repositories. Lastly, data sampled from standard pretraining corpora comes from authors collected through automatic webscrapping and large scale sampling of archival sources, making it difficult to recover much specific information about these authors. #### Annotation process No annotation is done on this data. #### Who are the annotators? No annotation is done on this data. #### Personal and Sensitive Information Sources in Paloma may contain personally identifiable information (PII). No attempt is made to measure or remove this information for the following reason: Paloma provides a small subsample of already publicly available data. The small size of this subsample renders this data less useful for aggregation of PII information than the already available public sources which we subsample. Bias, Risks, and Limitations ---------------------------- It is beyond the scope of any one group of researchers to prescribe an exhaustive set of domains that should be examined for a LM. Rather Paloma brings together a substantial selection of domains that are identifiable from already available metadata to demonstrate the kinds of analyses possible with hundreds of domains and rigorous experimental controls. Different research goals will motivate different definitions and selections of domains, but other researchers can apply the guidelines we detail in our paper to novel fine-grained domains suitable for their research questions. One of the key advantages of evaluating a model by its fit to a collection of text representing a domain is that such domains can be identified not just by researchers who study LMs. We hope future work will identify many more domains that no one discipline would think to look at. In Paloma, we distinguish sources from domains, although not all cases permit such easy distinction. We use *source* to refer to a selection of data that is characterized by the decisions of the people who curated that data, whether that curation is automatic as in scraping C4 or manual as in selecting the subcorpora of the The Pile. By contrast we use *domain* to refer to a set of documents that belong together because they are originally produced by a group of humans that share a distinct social context. Considered as such, domains may overlap; a document's author may belong to the set of English speakers in Jamaica and the set of AI researchers. Further note, that domains are often latent categorizations which we only approximate because complete metadata does not exist. Also, some domains in Paloma appear in multiple sources, such as academic papers. Though The Pile and RedPajama process academic papers differently, the subcorpora on academic papers in each source represent different approximations of the same or very similar domains. However for the sake of simplicity, we make the reductive assumption of counting all 585 domains in Paloma as fully distinct. ### Recommendations In our paper we outline guidelines for evaluating language model fit. We encourage users of Paloma to adopt these experimental controls for metric variance when subsampling, benchmark contamination, differing tokenization, training data order, and evaluation data format. BibTeX: Dataset Card Contact -------------------- {ianm,jessed}@URL
[ "### Benchmark Inference and Submissions\n\n\nWe invite submissions to our benchmark and organize results by comparability based on compliance with guidelines such as the removal of benchmark contamination from pretraining. Standardized inference code for running comprable evaluations and details about making submissions to the Paloma benchmark can be found at the following link.\n\n\nHow to evaluate and how to submit", "### Dataset Description\n\n\nPaloma is for examining relative differences in LM fit on domains. We take these relative differences as a proxy of model fit to the shared knowledge, values, and social context that position the humans producing language in a domain. While we expect contemporary LMs to have a limited fit to the most complex of these latent factors of domains, improving fit to all factors is necessary both to improve perplexity and for any actual use of the LM. For example, better perplexity on a particular dialect of English suggests that that model will make a better chatbot for people that speak that dialect.\n\n\nThe sources of evaluation data in Paloma were selected based on the following desiderata: 1) including known resources, 2) including fine-grained domains, 3) including domains representing specific communities of interest. Different lines of research will require different selections of domains; Paloma aims to enable research on differences in LM fit over the hundreds of domains that are readily available in existing metadata.\n\n\nNote that we are not able to re-host 2 of the 18 sources in Paloma comprising 39 domains. These are The Pile and ICE. The ICE corpus is available on request to the original authors following the instructions here.\n\n\nCurated by: Ian Magnusson, Akshita Bhagia, Valentin Hofmann, Luca Soldaini, Ananya Harsh Jha, Oyvind Tafjord, Dustin Schwenk, Evan Pete Walsh, Yanai Elazar, Kyle Lo, Dirk Groeneveld, Iz Beltagy, Hannaneh Hajishirzi, Noah A. Smith, Kyle Richardson, and Jesse Dodge\n\n\nLanguages: We elect to focus just on the language modeling of English and code data.\n\n\nLicense: The data subsets are licensed under the AI2 ImpACT License - Low Risk Artifacts, except as listed below.\n\n\n* Wikitext-103 - CC BY-SA\n* TwitterAAE - for research purposes only\n* Red Pajama - see license details\n* M2D2 - CC BY-NC\n\n\nPaper: URL", "### Dataset Sources\n\n\n* Code\n* Paloma 1B Baseline Models: Dolma, Pile, RedPajama, C4, mC4-en, Falcon-RefinedWeb\n\n\nUses\n----\n\n\nThis benchmark is intended for use in evaluating language model fit to fine-grained domains.", "### Direct Use\n\n\nThis dataset should be used for evaluating the likilihood of text from a given domain by a language model.", "### Out-of-Scope Use\n\n\nNote that the sources contained in this benchmark include varying licenses with differing restrictions (see License)\n\n\nDataset Structure\n-----------------\n\n\nThe sources in this dataset are each organized into their own subcorpus. This consists of a 'val' and 'test' split. Data within this is organized as files with lines separated JSON data where each line represents a document and its associated metadata. The type of metadata available varies from source to source, but each line contains at least a field ''text'' which contains the text of the document.\n\n\nDataset Creation\n----------------", "### Curation Rationale\n\n\nPerplexity is conventionally reported on held out data from a model's training distribution or a small number of traditional test sets. Such monolithic evaluation ignores potential variation of model fit across different domains that LMs implicitly learn to model. We curate sources of fine-grained textual domains in Paloma to enable evaluation of language model fit to specific domains of text. Paloma is inspired by and incorporates previous work that curates corpora with marked domains (The Pile, M2D2, C4 100 Domains, ICE, TwitterAAE). We conduct a stratified subsample over domains where we set a minimum subsample size based on emperical estimation of the variance over subsamples.", "### Source Data", "#### Standard language modeling benchmarks\n\n\nThough it is common practice to evaluate on held out data from the pretraining corpus of a given model, we evaluate *across* several major pretraining corpora and standard language modeling benchmarks. We also break down performance per domain within the datasets that have multiple domains. Note that although the Paloma benchmark analysis in our paper describes results on the Pile, we are not able to re-host this data.\n\n\nSource: c4-en, Citation: Raffel et al (2019) via Dodge et al (2021), Description: Standard contemporary LM pretraining corpus automatically filtered from the April 2019 Common Crawl scrape\nSource: mc4-en, Citation: Xue et al (2021), Description: The English language portion of a pretraining corpus automatically filtered from 71 Common Crawl scrapes\nSource: Pile, Citation: Gao et al (2020), Description: Standard contemporary LM benchmark from curated multi-source data including large scale non-webscraped sources\nSource: Wikitext-103, Citation: Merity et al (2016), Description: A standard collection of verified “Good” and “Featured” articles on Wikipedia\nSource: Penn Tree Bank, Citation: Marcus et al (1999) via Nunes, Davide. (2020), Description: Classic Wall Street Journal benchmark with linguistic structure annotations omitted\nSource: RedPajama, Citation: Together Computer (2023), Description: A publicly available reproduction of the LLaMA (Touvron et al., 2023) pretraining source mixture, combining large amounts of webscraped text with smaller curated sources\nSource: Falcon-RefinedWeb, Citation: Penedo et al. (2023), Description: A corpus of English sampled from all Common Crawl scrapes until June 2023, more aggressively filtered and deduplicated than c4 and mc4-en\nSource: Dolma v1.5, Citation: Soldaini et al. (2023), Description: A three trillion token corpus that samples sources commonly used to train LMs in order to enable open research on pretraining data", "#### Fine-grained domain benchmarks\n\n\nWhere typical pretraining corpora offer at most tens of labeled domains usually based on where the data is sourced, we examine datasets with up to an order of magnitude more domains. Existing datasets (M2D2 and c4 100 Domains) and datasets we curate from Dolma v1.5 use metadata to define hundreds of domains over Wikipedia, Semantic Scholar, Common Crawl, Reddit, and Github data. These include diverse domains from *Culture and the arts: Performing arts*, a topic on Wikipedia, to *r/depression*, a forum on Reddit for mental health support.\n\n\nSource: M2D2 S2ORC, Citation: Reid et al (2022), Description: Papers from Semantic Scholar grouped by hierarchical academic field categories\nSource: M2D2 Wiki, Citation: Reid et al (2022), Description: Wikipedia articles grouped by hierarchical categories in the Wikipedia ontology\nSource: c4 100 Domains, Citation: Chronopoulou et al (2021), Description: Balanced samples of the top 100 URL domains in C4\nSource: Dolma 100 Subreddits, Citation: Soldaini et al. (2023), Description: Balanced samples of the top 100 Subreddits from the Dolma Reddit subset\nSource: Dolma 100 Programming Languages, Citation: Kocetkov et al. (2022) via Soldaini et al. (2023), Description: Balanced samples of the top 100 programming languages from the Dolma Stack subset", "#### Disparities between speech communities\n\n\nSome communities are known to be underserved by existing models. Following HELM, We measure disparities in performance on corpora of African American English and White aligned English from TwitterAAE, as well as nine corpora of English from different countries with the ICE dataset. Note that although the Paloma benchmark analysis in our paper describes results on ICE, we are not able to re-host this data.\n\n\nSource: ICE, Citation: Greenbaum and Nelson (1996) via Liang et al (2022), Description: English from around the world curated by local experts, with subsets for Canada, East Africa, Hong Kong, India, Ireland, Jamaica, Philippines, Singapore, and the USA\nSource: TwitterAAE, Citation: Blodgett et al. (2016) via Liang et al (2022), Description: Balanced sets of tweets classified as African American or White aligned English", "#### Fringe sources previously studied for problematic discourse\n\n\nText from some fringe online communities has been shown to contain larger proportions of hate speech and toxicity than more mainstream sources. Longpre et al. (2023) have shown that varying amount of toxic content in pretraining data exhibits a tradeoff between non-toxic generation and ability to classify toxicity, indicating that model fit to discourse with toxicity is worth measuring. Measuring perplexity on Manosphere, Gab, and 4chan characterises model familiarity with distinct social contexts in which toxic language arises.\n\n\nSource: Manosphere Corpus, Citation: Ribeiro et al (2020), Description: 9 forums where a set of related masculinist ideologies developed over the 2000s and 2010s\nSource: Gab Corpus, Citation: Zannettou et al (2018), Description: Data from 2016-18 from an alt-right, free-speech-oriented social media platform shown to contain more hate speech than mainstream platforms\nSource: 4chan Corpus, Citation: Papasavva et al (2020), Description: Data from 2016-19 from a politics subforum of an anonymity-focused forum found to contain among the highest rates of toxic content", "#### Data Collection and Processing\n\n\nThe data in Paloma are sampled from existing sources. Most often perplexity evaluation data is subsampled uniformly over the original distribution of domains in a source, resulting in more or less tokens from each domain in the evaluation data based on how well represented they are in the corpus. We instead employ stratified sampling, in which all sources with marked domains are partitioned by domain and a uniform sample of the same size is taken from each partition. Specifically, documents are sampled from each domain until a target number of tokens is reached. This helps ensure that no domains are lost or very small after subsampling.\n\n\nIn social media domains with additional metadata that is typically displayed along with posts, we format metadata such as timestamps into the document ''text'' field. Where information is available about how threads of posts are connected, documents in that domain contain all posts in a given thread.\n\n\nAdditional details on source specific processing are available in our paper.", "#### Who are the source data producers?\n\n\nText data from each of the sources curated in Paloma is created by varying sets of original authors. Some sources are collected from users of specific internet fora such as specific subreddits. Other data is collected on the basis of expert or automated classification of demographic groups. Other data is collected from authors of archival material including scientific preprints, Wikipedia, and code repositories. Lastly, data sampled from standard pretraining corpora comes from authors collected through automatic webscrapping and large scale sampling of archival sources, making it difficult to recover much specific information about these authors.", "#### Annotation process\n\n\nNo annotation is done on this data.", "#### Who are the annotators?\n\n\nNo annotation is done on this data.", "#### Personal and Sensitive Information\n\n\nSources in Paloma may contain personally identifiable information (PII). No attempt is made to measure or remove this information for the following reason: Paloma provides a small subsample of already publicly available data. The small size of this subsample renders this data less useful for aggregation of PII information than the already available public sources which we subsample.\n\n\nBias, Risks, and Limitations\n----------------------------\n\n\nIt is beyond the scope of any one group of researchers to prescribe an exhaustive set of domains that should be examined for a LM. Rather Paloma brings together a substantial selection of domains that are identifiable from already available metadata to demonstrate the kinds of analyses possible with hundreds of domains and rigorous experimental controls.\nDifferent research goals will motivate different definitions and selections of domains, but other researchers can apply the guidelines we detail in our paper to novel fine-grained domains suitable for their research questions. One of the key advantages of evaluating a model by its fit to a collection of text representing a domain is that such domains can be identified not just by researchers who study LMs. We hope future work will identify many more domains that no one discipline would think to look at.\n\n\nIn Paloma, we distinguish sources from domains, although not all cases permit such easy distinction. We use *source* to refer to a selection of data that is characterized by the decisions of the people who curated that data, whether that curation is automatic as in scraping C4 or manual as in selecting the subcorpora of the The Pile. By contrast we use *domain* to refer to a set of documents that belong together because they are originally produced by a group of humans that share a distinct social context. Considered as such, domains may overlap; a document's author may belong to the set of English speakers in Jamaica and the set of AI researchers. Further note, that domains are often latent categorizations which we only approximate because complete metadata does not exist.\n\n\nAlso, some domains in Paloma appear in multiple sources, such as academic papers. Though The Pile and RedPajama process academic papers differently, the subcorpora on academic papers in each source represent different approximations of the same or very similar domains. However for the sake of simplicity, we make the reductive assumption of counting all 585 domains in Paloma as fully distinct.", "### Recommendations\n\n\nIn our paper we outline guidelines for evaluating language model fit. We encourage users of Paloma to adopt these experimental controls for metric variance when subsampling, benchmark contamination, differing tokenization, training data order, and evaluation data format.\n\n\nBibTeX:\n\n\nDataset Card Contact\n--------------------\n\n\n{ianm,jessed}@URL" ]
[ "TAGS\n#arxiv-2312.10523 #arxiv-2305.13169 #region-us \n", "### Benchmark Inference and Submissions\n\n\nWe invite submissions to our benchmark and organize results by comparability based on compliance with guidelines such as the removal of benchmark contamination from pretraining. Standardized inference code for running comprable evaluations and details about making submissions to the Paloma benchmark can be found at the following link.\n\n\nHow to evaluate and how to submit", "### Dataset Description\n\n\nPaloma is for examining relative differences in LM fit on domains. We take these relative differences as a proxy of model fit to the shared knowledge, values, and social context that position the humans producing language in a domain. While we expect contemporary LMs to have a limited fit to the most complex of these latent factors of domains, improving fit to all factors is necessary both to improve perplexity and for any actual use of the LM. For example, better perplexity on a particular dialect of English suggests that that model will make a better chatbot for people that speak that dialect.\n\n\nThe sources of evaluation data in Paloma were selected based on the following desiderata: 1) including known resources, 2) including fine-grained domains, 3) including domains representing specific communities of interest. Different lines of research will require different selections of domains; Paloma aims to enable research on differences in LM fit over the hundreds of domains that are readily available in existing metadata.\n\n\nNote that we are not able to re-host 2 of the 18 sources in Paloma comprising 39 domains. These are The Pile and ICE. The ICE corpus is available on request to the original authors following the instructions here.\n\n\nCurated by: Ian Magnusson, Akshita Bhagia, Valentin Hofmann, Luca Soldaini, Ananya Harsh Jha, Oyvind Tafjord, Dustin Schwenk, Evan Pete Walsh, Yanai Elazar, Kyle Lo, Dirk Groeneveld, Iz Beltagy, Hannaneh Hajishirzi, Noah A. Smith, Kyle Richardson, and Jesse Dodge\n\n\nLanguages: We elect to focus just on the language modeling of English and code data.\n\n\nLicense: The data subsets are licensed under the AI2 ImpACT License - Low Risk Artifacts, except as listed below.\n\n\n* Wikitext-103 - CC BY-SA\n* TwitterAAE - for research purposes only\n* Red Pajama - see license details\n* M2D2 - CC BY-NC\n\n\nPaper: URL", "### Dataset Sources\n\n\n* Code\n* Paloma 1B Baseline Models: Dolma, Pile, RedPajama, C4, mC4-en, Falcon-RefinedWeb\n\n\nUses\n----\n\n\nThis benchmark is intended for use in evaluating language model fit to fine-grained domains.", "### Direct Use\n\n\nThis dataset should be used for evaluating the likilihood of text from a given domain by a language model.", "### Out-of-Scope Use\n\n\nNote that the sources contained in this benchmark include varying licenses with differing restrictions (see License)\n\n\nDataset Structure\n-----------------\n\n\nThe sources in this dataset are each organized into their own subcorpus. This consists of a 'val' and 'test' split. Data within this is organized as files with lines separated JSON data where each line represents a document and its associated metadata. The type of metadata available varies from source to source, but each line contains at least a field ''text'' which contains the text of the document.\n\n\nDataset Creation\n----------------", "### Curation Rationale\n\n\nPerplexity is conventionally reported on held out data from a model's training distribution or a small number of traditional test sets. Such monolithic evaluation ignores potential variation of model fit across different domains that LMs implicitly learn to model. We curate sources of fine-grained textual domains in Paloma to enable evaluation of language model fit to specific domains of text. Paloma is inspired by and incorporates previous work that curates corpora with marked domains (The Pile, M2D2, C4 100 Domains, ICE, TwitterAAE). We conduct a stratified subsample over domains where we set a minimum subsample size based on emperical estimation of the variance over subsamples.", "### Source Data", "#### Standard language modeling benchmarks\n\n\nThough it is common practice to evaluate on held out data from the pretraining corpus of a given model, we evaluate *across* several major pretraining corpora and standard language modeling benchmarks. We also break down performance per domain within the datasets that have multiple domains. Note that although the Paloma benchmark analysis in our paper describes results on the Pile, we are not able to re-host this data.\n\n\nSource: c4-en, Citation: Raffel et al (2019) via Dodge et al (2021), Description: Standard contemporary LM pretraining corpus automatically filtered from the April 2019 Common Crawl scrape\nSource: mc4-en, Citation: Xue et al (2021), Description: The English language portion of a pretraining corpus automatically filtered from 71 Common Crawl scrapes\nSource: Pile, Citation: Gao et al (2020), Description: Standard contemporary LM benchmark from curated multi-source data including large scale non-webscraped sources\nSource: Wikitext-103, Citation: Merity et al (2016), Description: A standard collection of verified “Good” and “Featured” articles on Wikipedia\nSource: Penn Tree Bank, Citation: Marcus et al (1999) via Nunes, Davide. (2020), Description: Classic Wall Street Journal benchmark with linguistic structure annotations omitted\nSource: RedPajama, Citation: Together Computer (2023), Description: A publicly available reproduction of the LLaMA (Touvron et al., 2023) pretraining source mixture, combining large amounts of webscraped text with smaller curated sources\nSource: Falcon-RefinedWeb, Citation: Penedo et al. (2023), Description: A corpus of English sampled from all Common Crawl scrapes until June 2023, more aggressively filtered and deduplicated than c4 and mc4-en\nSource: Dolma v1.5, Citation: Soldaini et al. (2023), Description: A three trillion token corpus that samples sources commonly used to train LMs in order to enable open research on pretraining data", "#### Fine-grained domain benchmarks\n\n\nWhere typical pretraining corpora offer at most tens of labeled domains usually based on where the data is sourced, we examine datasets with up to an order of magnitude more domains. Existing datasets (M2D2 and c4 100 Domains) and datasets we curate from Dolma v1.5 use metadata to define hundreds of domains over Wikipedia, Semantic Scholar, Common Crawl, Reddit, and Github data. These include diverse domains from *Culture and the arts: Performing arts*, a topic on Wikipedia, to *r/depression*, a forum on Reddit for mental health support.\n\n\nSource: M2D2 S2ORC, Citation: Reid et al (2022), Description: Papers from Semantic Scholar grouped by hierarchical academic field categories\nSource: M2D2 Wiki, Citation: Reid et al (2022), Description: Wikipedia articles grouped by hierarchical categories in the Wikipedia ontology\nSource: c4 100 Domains, Citation: Chronopoulou et al (2021), Description: Balanced samples of the top 100 URL domains in C4\nSource: Dolma 100 Subreddits, Citation: Soldaini et al. (2023), Description: Balanced samples of the top 100 Subreddits from the Dolma Reddit subset\nSource: Dolma 100 Programming Languages, Citation: Kocetkov et al. (2022) via Soldaini et al. (2023), Description: Balanced samples of the top 100 programming languages from the Dolma Stack subset", "#### Disparities between speech communities\n\n\nSome communities are known to be underserved by existing models. Following HELM, We measure disparities in performance on corpora of African American English and White aligned English from TwitterAAE, as well as nine corpora of English from different countries with the ICE dataset. Note that although the Paloma benchmark analysis in our paper describes results on ICE, we are not able to re-host this data.\n\n\nSource: ICE, Citation: Greenbaum and Nelson (1996) via Liang et al (2022), Description: English from around the world curated by local experts, with subsets for Canada, East Africa, Hong Kong, India, Ireland, Jamaica, Philippines, Singapore, and the USA\nSource: TwitterAAE, Citation: Blodgett et al. (2016) via Liang et al (2022), Description: Balanced sets of tweets classified as African American or White aligned English", "#### Fringe sources previously studied for problematic discourse\n\n\nText from some fringe online communities has been shown to contain larger proportions of hate speech and toxicity than more mainstream sources. Longpre et al. (2023) have shown that varying amount of toxic content in pretraining data exhibits a tradeoff between non-toxic generation and ability to classify toxicity, indicating that model fit to discourse with toxicity is worth measuring. Measuring perplexity on Manosphere, Gab, and 4chan characterises model familiarity with distinct social contexts in which toxic language arises.\n\n\nSource: Manosphere Corpus, Citation: Ribeiro et al (2020), Description: 9 forums where a set of related masculinist ideologies developed over the 2000s and 2010s\nSource: Gab Corpus, Citation: Zannettou et al (2018), Description: Data from 2016-18 from an alt-right, free-speech-oriented social media platform shown to contain more hate speech than mainstream platforms\nSource: 4chan Corpus, Citation: Papasavva et al (2020), Description: Data from 2016-19 from a politics subforum of an anonymity-focused forum found to contain among the highest rates of toxic content", "#### Data Collection and Processing\n\n\nThe data in Paloma are sampled from existing sources. Most often perplexity evaluation data is subsampled uniformly over the original distribution of domains in a source, resulting in more or less tokens from each domain in the evaluation data based on how well represented they are in the corpus. We instead employ stratified sampling, in which all sources with marked domains are partitioned by domain and a uniform sample of the same size is taken from each partition. Specifically, documents are sampled from each domain until a target number of tokens is reached. This helps ensure that no domains are lost or very small after subsampling.\n\n\nIn social media domains with additional metadata that is typically displayed along with posts, we format metadata such as timestamps into the document ''text'' field. Where information is available about how threads of posts are connected, documents in that domain contain all posts in a given thread.\n\n\nAdditional details on source specific processing are available in our paper.", "#### Who are the source data producers?\n\n\nText data from each of the sources curated in Paloma is created by varying sets of original authors. Some sources are collected from users of specific internet fora such as specific subreddits. Other data is collected on the basis of expert or automated classification of demographic groups. Other data is collected from authors of archival material including scientific preprints, Wikipedia, and code repositories. Lastly, data sampled from standard pretraining corpora comes from authors collected through automatic webscrapping and large scale sampling of archival sources, making it difficult to recover much specific information about these authors.", "#### Annotation process\n\n\nNo annotation is done on this data.", "#### Who are the annotators?\n\n\nNo annotation is done on this data.", "#### Personal and Sensitive Information\n\n\nSources in Paloma may contain personally identifiable information (PII). No attempt is made to measure or remove this information for the following reason: Paloma provides a small subsample of already publicly available data. The small size of this subsample renders this data less useful for aggregation of PII information than the already available public sources which we subsample.\n\n\nBias, Risks, and Limitations\n----------------------------\n\n\nIt is beyond the scope of any one group of researchers to prescribe an exhaustive set of domains that should be examined for a LM. Rather Paloma brings together a substantial selection of domains that are identifiable from already available metadata to demonstrate the kinds of analyses possible with hundreds of domains and rigorous experimental controls.\nDifferent research goals will motivate different definitions and selections of domains, but other researchers can apply the guidelines we detail in our paper to novel fine-grained domains suitable for their research questions. One of the key advantages of evaluating a model by its fit to a collection of text representing a domain is that such domains can be identified not just by researchers who study LMs. We hope future work will identify many more domains that no one discipline would think to look at.\n\n\nIn Paloma, we distinguish sources from domains, although not all cases permit such easy distinction. We use *source* to refer to a selection of data that is characterized by the decisions of the people who curated that data, whether that curation is automatic as in scraping C4 or manual as in selecting the subcorpora of the The Pile. By contrast we use *domain* to refer to a set of documents that belong together because they are originally produced by a group of humans that share a distinct social context. Considered as such, domains may overlap; a document's author may belong to the set of English speakers in Jamaica and the set of AI researchers. Further note, that domains are often latent categorizations which we only approximate because complete metadata does not exist.\n\n\nAlso, some domains in Paloma appear in multiple sources, such as academic papers. Though The Pile and RedPajama process academic papers differently, the subcorpora on academic papers in each source represent different approximations of the same or very similar domains. However for the sake of simplicity, we make the reductive assumption of counting all 585 domains in Paloma as fully distinct.", "### Recommendations\n\n\nIn our paper we outline guidelines for evaluating language model fit. We encourage users of Paloma to adopt these experimental controls for metric variance when subsampling, benchmark contamination, differing tokenization, training data order, and evaluation data format.\n\n\nBibTeX:\n\n\nDataset Card Contact\n--------------------\n\n\n{ianm,jessed}@URL" ]
[ 24, 81, 453, 65, 28, 138, 169, 4, 470, 358, 200, 264, 222, 146, 14, 18, 550, 82 ]
[ "passage: TAGS\n#arxiv-2312.10523 #arxiv-2305.13169 #region-us \n### Benchmark Inference and Submissions\n\n\nWe invite submissions to our benchmark and organize results by comparability based on compliance with guidelines such as the removal of benchmark contamination from pretraining. Standardized inference code for running comprable evaluations and details about making submissions to the Paloma benchmark can be found at the following link.\n\n\nHow to evaluate and how to submit", "passage: ### Dataset Description\n\n\nPaloma is for examining relative differences in LM fit on domains. We take these relative differences as a proxy of model fit to the shared knowledge, values, and social context that position the humans producing language in a domain. While we expect contemporary LMs to have a limited fit to the most complex of these latent factors of domains, improving fit to all factors is necessary both to improve perplexity and for any actual use of the LM. For example, better perplexity on a particular dialect of English suggests that that model will make a better chatbot for people that speak that dialect.\n\n\nThe sources of evaluation data in Paloma were selected based on the following desiderata: 1) including known resources, 2) including fine-grained domains, 3) including domains representing specific communities of interest. Different lines of research will require different selections of domains; Paloma aims to enable research on differences in LM fit over the hundreds of domains that are readily available in existing metadata.\n\n\nNote that we are not able to re-host 2 of the 18 sources in Paloma comprising 39 domains. These are The Pile and ICE. The ICE corpus is available on request to the original authors following the instructions here.\n\n\nCurated by: Ian Magnusson, Akshita Bhagia, Valentin Hofmann, Luca Soldaini, Ananya Harsh Jha, Oyvind Tafjord, Dustin Schwenk, Evan Pete Walsh, Yanai Elazar, Kyle Lo, Dirk Groeneveld, Iz Beltagy, Hannaneh Hajishirzi, Noah A. Smith, Kyle Richardson, and Jesse Dodge\n\n\nLanguages: We elect to focus just on the language modeling of English and code data.\n\n\nLicense: The data subsets are licensed under the AI2 ImpACT License - Low Risk Artifacts, except as listed below.\n\n\n* Wikitext-103 - CC BY-SA\n* TwitterAAE - for research purposes only\n* Red Pajama - see license details\n* M2D2 - CC BY-NC\n\n\nPaper: URL### Dataset Sources\n\n\n* Code\n* Paloma 1B Baseline Models: Dolma, Pile, RedPajama, C4, mC4-en, Falcon-RefinedWeb\n\n\nUses\n----\n\n\nThis benchmark is intended for use in evaluating language model fit to fine-grained domains.### Direct Use\n\n\nThis dataset should be used for evaluating the likilihood of text from a given domain by a language model.### Out-of-Scope Use\n\n\nNote that the sources contained in this benchmark include varying licenses with differing restrictions (see License)\n\n\nDataset Structure\n-----------------\n\n\nThe sources in this dataset are each organized into their own subcorpus. This consists of a 'val' and 'test' split. Data within this is organized as files with lines separated JSON data where each line represents a document and its associated metadata. The type of metadata available varies from source to source, but each line contains at least a field ''text'' which contains the text of the document.\n\n\nDataset Creation\n----------------### Curation Rationale\n\n\nPerplexity is conventionally reported on held out data from a model's training distribution or a small number of traditional test sets. Such monolithic evaluation ignores potential variation of model fit across different domains that LMs implicitly learn to model. We curate sources of fine-grained textual domains in Paloma to enable evaluation of language model fit to specific domains of text. Paloma is inspired by and incorporates previous work that curates corpora with marked domains (The Pile, M2D2, C4 100 Domains, ICE, TwitterAAE). We conduct a stratified subsample over domains where we set a minimum subsample size based on emperical estimation of the variance over subsamples.### Source Data", "passage: #### Standard language modeling benchmarks\n\n\nThough it is common practice to evaluate on held out data from the pretraining corpus of a given model, we evaluate *across* several major pretraining corpora and standard language modeling benchmarks. We also break down performance per domain within the datasets that have multiple domains. Note that although the Paloma benchmark analysis in our paper describes results on the Pile, we are not able to re-host this data.\n\n\nSource: c4-en, Citation: Raffel et al (2019) via Dodge et al (2021), Description: Standard contemporary LM pretraining corpus automatically filtered from the April 2019 Common Crawl scrape\nSource: mc4-en, Citation: Xue et al (2021), Description: The English language portion of a pretraining corpus automatically filtered from 71 Common Crawl scrapes\nSource: Pile, Citation: Gao et al (2020), Description: Standard contemporary LM benchmark from curated multi-source data including large scale non-webscraped sources\nSource: Wikitext-103, Citation: Merity et al (2016), Description: A standard collection of verified “Good” and “Featured” articles on Wikipedia\nSource: Penn Tree Bank, Citation: Marcus et al (1999) via Nunes, Davide. (2020), Description: Classic Wall Street Journal benchmark with linguistic structure annotations omitted\nSource: RedPajama, Citation: Together Computer (2023), Description: A publicly available reproduction of the LLaMA (Touvron et al., 2023) pretraining source mixture, combining large amounts of webscraped text with smaller curated sources\nSource: Falcon-RefinedWeb, Citation: Penedo et al. (2023), Description: A corpus of English sampled from all Common Crawl scrapes until June 2023, more aggressively filtered and deduplicated than c4 and mc4-en\nSource: Dolma v1.5, Citation: Soldaini et al. (2023), Description: A three trillion token corpus that samples sources commonly used to train LMs in order to enable open research on pretraining data#### Fine-grained domain benchmarks\n\n\nWhere typical pretraining corpora offer at most tens of labeled domains usually based on where the data is sourced, we examine datasets with up to an order of magnitude more domains. Existing datasets (M2D2 and c4 100 Domains) and datasets we curate from Dolma v1.5 use metadata to define hundreds of domains over Wikipedia, Semantic Scholar, Common Crawl, Reddit, and Github data. These include diverse domains from *Culture and the arts: Performing arts*, a topic on Wikipedia, to *r/depression*, a forum on Reddit for mental health support.\n\n\nSource: M2D2 S2ORC, Citation: Reid et al (2022), Description: Papers from Semantic Scholar grouped by hierarchical academic field categories\nSource: M2D2 Wiki, Citation: Reid et al (2022), Description: Wikipedia articles grouped by hierarchical categories in the Wikipedia ontology\nSource: c4 100 Domains, Citation: Chronopoulou et al (2021), Description: Balanced samples of the top 100 URL domains in C4\nSource: Dolma 100 Subreddits, Citation: Soldaini et al. (2023), Description: Balanced samples of the top 100 Subreddits from the Dolma Reddit subset\nSource: Dolma 100 Programming Languages, Citation: Kocetkov et al. (2022) via Soldaini et al. (2023), Description: Balanced samples of the top 100 programming languages from the Dolma Stack subset", "passage: #### Disparities between speech communities\n\n\nSome communities are known to be underserved by existing models. Following HELM, We measure disparities in performance on corpora of African American English and White aligned English from TwitterAAE, as well as nine corpora of English from different countries with the ICE dataset. Note that although the Paloma benchmark analysis in our paper describes results on ICE, we are not able to re-host this data.\n\n\nSource: ICE, Citation: Greenbaum and Nelson (1996) via Liang et al (2022), Description: English from around the world curated by local experts, with subsets for Canada, East Africa, Hong Kong, India, Ireland, Jamaica, Philippines, Singapore, and the USA\nSource: TwitterAAE, Citation: Blodgett et al. (2016) via Liang et al (2022), Description: Balanced sets of tweets classified as African American or White aligned English#### Fringe sources previously studied for problematic discourse\n\n\nText from some fringe online communities has been shown to contain larger proportions of hate speech and toxicity than more mainstream sources. Longpre et al. (2023) have shown that varying amount of toxic content in pretraining data exhibits a tradeoff between non-toxic generation and ability to classify toxicity, indicating that model fit to discourse with toxicity is worth measuring. Measuring perplexity on Manosphere, Gab, and 4chan characterises model familiarity with distinct social contexts in which toxic language arises.\n\n\nSource: Manosphere Corpus, Citation: Ribeiro et al (2020), Description: 9 forums where a set of related masculinist ideologies developed over the 2000s and 2010s\nSource: Gab Corpus, Citation: Zannettou et al (2018), Description: Data from 2016-18 from an alt-right, free-speech-oriented social media platform shown to contain more hate speech than mainstream platforms\nSource: 4chan Corpus, Citation: Papasavva et al (2020), Description: Data from 2016-19 from a politics subforum of an anonymity-focused forum found to contain among the highest rates of toxic content#### Data Collection and Processing\n\n\nThe data in Paloma are sampled from existing sources. Most often perplexity evaluation data is subsampled uniformly over the original distribution of domains in a source, resulting in more or less tokens from each domain in the evaluation data based on how well represented they are in the corpus. We instead employ stratified sampling, in which all sources with marked domains are partitioned by domain and a uniform sample of the same size is taken from each partition. Specifically, documents are sampled from each domain until a target number of tokens is reached. This helps ensure that no domains are lost or very small after subsampling.\n\n\nIn social media domains with additional metadata that is typically displayed along with posts, we format metadata such as timestamps into the document ''text'' field. Where information is available about how threads of posts are connected, documents in that domain contain all posts in a given thread.\n\n\nAdditional details on source specific processing are available in our paper.", "passage: #### Who are the source data producers?\n\n\nText data from each of the sources curated in Paloma is created by varying sets of original authors. Some sources are collected from users of specific internet fora such as specific subreddits. Other data is collected on the basis of expert or automated classification of demographic groups. Other data is collected from authors of archival material including scientific preprints, Wikipedia, and code repositories. Lastly, data sampled from standard pretraining corpora comes from authors collected through automatic webscrapping and large scale sampling of archival sources, making it difficult to recover much specific information about these authors.#### Annotation process\n\n\nNo annotation is done on this data.#### Who are the annotators?\n\n\nNo annotation is done on this data." ]
58ca69c580dd550e42026452a57e53db571b84da
# Mosaic format for filtered combine dataset to finetune TinyLlama models This repository is to store dataset shards using mosaic format. 1. https://github.com/malaysia-ai/dedup-text-dataset/blob/main/tinyllama/combine-dataset.ipynb 2. using tokenizer https://huggingface.co/TinyLlama/TinyLlama-1.1B-intermediate-step-955k-token-2T 3. 4096 context length. ## how-to 1. git clone, ```bash git lfs clone https://huggingface.co/datasets/malaysia-ai/mosaic-tinyllama ``` 2. load it, ```python from streaming import LocalDataset import numpy as np from streaming.base.format.mds.encodings import Encoding, _encodings class UInt16(Encoding): def encode(self, obj) -> bytes: return obj.tobytes() def decode(self, data: bytes): return np.frombuffer(data, np.uint16) _encodings['uint16'] = UInt16 dataset = LocalDataset('mosaic-tinyllama') len(dataset) ```
malaysia-ai/mosaic-tinyllama
[ "language:ms", "region:us" ]
2023-11-29T00:52:51+00:00
{"language": ["ms"]}
2023-11-29T06:27:54+00:00
[]
[ "ms" ]
TAGS #language-Malay (macrolanguage) #region-us
# Mosaic format for filtered combine dataset to finetune TinyLlama models This repository is to store dataset shards using mosaic format. 1. URL 2. using tokenizer URL 3. 4096 context length. ## how-to 1. git clone, 2. load it,
[ "# Mosaic format for filtered combine dataset to finetune TinyLlama models\n\nThis repository is to store dataset shards using mosaic format.\n\n1. URL\n2. using tokenizer URL\n3. 4096 context length.", "## how-to\n\n1. git clone,\n\n\n\n2. load it," ]
[ "TAGS\n#language-Malay (macrolanguage) #region-us \n", "# Mosaic format for filtered combine dataset to finetune TinyLlama models\n\nThis repository is to store dataset shards using mosaic format.\n\n1. URL\n2. using tokenizer URL\n3. 4096 context length.", "## how-to\n\n1. git clone,\n\n\n\n2. load it," ]
[ 16, 48, 13 ]
[ "passage: TAGS\n#language-Malay (macrolanguage) #region-us \n# Mosaic format for filtered combine dataset to finetune TinyLlama models\n\nThis repository is to store dataset shards using mosaic format.\n\n1. URL\n2. using tokenizer URL\n3. 4096 context length.## how-to\n\n1. git clone,\n\n\n\n2. load it," ]
2e4080bf31f4ab4fa360e1c3ea0d5aaad5a4b9c9
Origianl Datasets - Open-Orca/OpenOrca - kyujinpy/OpenOrca-KO
jhflow/orca_ko_en_pair
[ "region:us" ]
2023-11-29T01:41:24+00:00
{}
2023-12-04T02:31:53+00:00
[]
[]
TAGS #region-us
Origianl Datasets - Open-Orca/OpenOrca - kyujinpy/OpenOrca-KO
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
034d1935a4e3268ab3cefd02f46e4c1faa380282
### Dataset Description #### Title **Legal Texts and Summaries Dataset** #### Description This dataset is a collection of legal documents and their associated summaries, subjects (materia), and keywords (voces). It is primarily focused on the field of labor law, with particular emphasis on legal proceedings, labor rights, and workers' compensation laws in Argentina. #### Structure Each entry in the dataset contains the following fields: - `sumario`: A unique identifier for the legal document. - `materia`: The subject of the legal document, in this case, "DERECHO DEL TRABAJO" (Labor Law). - `voces`: Keywords or phrases summarizing the main topics of the document, such as "FALLO PLENARIO", "DERECHO LABORAL", "LEY SOBRE RIESGOS DEL TRABAJO", etc. - `sentencia`: The text of the legal document, which includes references to laws, legal precedents, and detailed analysis. The text was summarized using Claude v2 LLM. - 'texto': A legal summary. #### Applications This dataset is valuable for legal research, especially in the domain of labor law. It can be used for training models in legal text summarization, keyword extraction, and legal document classification. Additionally, it's useful for academic research in legal studies, especially regarding labor law and workers' compensation in Argentina. #### Format The dataset is provided in JSON format, ensuring easy integration with most data processing and machine learning tools. #### Language The content is predominantly in Spanish, reflecting its focus on Argentine law. #### Source and Authenticity The data is compiled from official legal documents and summaries from Argentina. It's important for users to verify the authenticity and current relevance of the legal texts as they might have undergone revisions or may not reflect the latest legal standings.
harpomaxx/jurisgpt
[ "license:openrail", "region:us" ]
2023-11-29T02:03:28+00:00
{"license": "openrail", "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "train_set.json"}, {"split": "test", "path": "test_set.json"}]}]}
2023-11-29T14:24:29+00:00
[]
[]
TAGS #license-openrail #region-us
### Dataset Description #### Title Legal Texts and Summaries Dataset #### Description This dataset is a collection of legal documents and their associated summaries, subjects (materia), and keywords (voces). It is primarily focused on the field of labor law, with particular emphasis on legal proceedings, labor rights, and workers' compensation laws in Argentina. #### Structure Each entry in the dataset contains the following fields: - 'sumario': A unique identifier for the legal document. - 'materia': The subject of the legal document, in this case, "DERECHO DEL TRABAJO" (Labor Law). - 'voces': Keywords or phrases summarizing the main topics of the document, such as "FALLO PLENARIO", "DERECHO LABORAL", "LEY SOBRE RIESGOS DEL TRABAJO", etc. - 'sentencia': The text of the legal document, which includes references to laws, legal precedents, and detailed analysis. The text was summarized using Claude v2 LLM. - 'texto': A legal summary. #### Applications This dataset is valuable for legal research, especially in the domain of labor law. It can be used for training models in legal text summarization, keyword extraction, and legal document classification. Additionally, it's useful for academic research in legal studies, especially regarding labor law and workers' compensation in Argentina. #### Format The dataset is provided in JSON format, ensuring easy integration with most data processing and machine learning tools. #### Language The content is predominantly in Spanish, reflecting its focus on Argentine law. #### Source and Authenticity The data is compiled from official legal documents and summaries from Argentina. It's important for users to verify the authenticity and current relevance of the legal texts as they might have undergone revisions or may not reflect the latest legal standings.
[ "### Dataset Description", "#### Title\nLegal Texts and Summaries Dataset", "#### Description\nThis dataset is a collection of legal documents and their associated summaries, subjects (materia), and keywords (voces). It is primarily focused on the field of labor law, with particular emphasis on legal proceedings, labor rights, and workers' compensation laws in Argentina.", "#### Structure\nEach entry in the dataset contains the following fields:\n- 'sumario': A unique identifier for the legal document.\n- 'materia': The subject of the legal document, in this case, \"DERECHO DEL TRABAJO\" (Labor Law).\n- 'voces': Keywords or phrases summarizing the main topics of the document, such as \"FALLO PLENARIO\", \"DERECHO LABORAL\", \"LEY SOBRE RIESGOS DEL TRABAJO\", etc.\n- 'sentencia': The text of the legal document, which includes references to laws, legal precedents, and detailed analysis. The text was summarized using Claude v2 LLM.\n- 'texto': A legal summary.", "#### Applications\nThis dataset is valuable for legal research, especially in the domain of labor law. It can be used for training models in legal text summarization, keyword extraction, and legal document classification. Additionally, it's useful for academic research in legal studies, especially regarding labor law and workers' compensation in Argentina.", "#### Format\nThe dataset is provided in JSON format, ensuring easy integration with most data processing and machine learning tools.", "#### Language\nThe content is predominantly in Spanish, reflecting its focus on Argentine law.", "#### Source and Authenticity\nThe data is compiled from official legal documents and summaries from Argentina. It's important for users to verify the authenticity and current relevance of the legal texts as they might have undergone revisions or may not reflect the latest legal standings." ]
[ "TAGS\n#license-openrail #region-us \n", "### Dataset Description", "#### Title\nLegal Texts and Summaries Dataset", "#### Description\nThis dataset is a collection of legal documents and their associated summaries, subjects (materia), and keywords (voces). It is primarily focused on the field of labor law, with particular emphasis on legal proceedings, labor rights, and workers' compensation laws in Argentina.", "#### Structure\nEach entry in the dataset contains the following fields:\n- 'sumario': A unique identifier for the legal document.\n- 'materia': The subject of the legal document, in this case, \"DERECHO DEL TRABAJO\" (Labor Law).\n- 'voces': Keywords or phrases summarizing the main topics of the document, such as \"FALLO PLENARIO\", \"DERECHO LABORAL\", \"LEY SOBRE RIESGOS DEL TRABAJO\", etc.\n- 'sentencia': The text of the legal document, which includes references to laws, legal precedents, and detailed analysis. The text was summarized using Claude v2 LLM.\n- 'texto': A legal summary.", "#### Applications\nThis dataset is valuable for legal research, especially in the domain of labor law. It can be used for training models in legal text summarization, keyword extraction, and legal document classification. Additionally, it's useful for academic research in legal studies, especially regarding labor law and workers' compensation in Argentina.", "#### Format\nThe dataset is provided in JSON format, ensuring easy integration with most data processing and machine learning tools.", "#### Language\nThe content is predominantly in Spanish, reflecting its focus on Argentine law.", "#### Source and Authenticity\nThe data is compiled from official legal documents and summaries from Argentina. It's important for users to verify the authenticity and current relevance of the legal texts as they might have undergone revisions or may not reflect the latest legal standings." ]
[ 12, 5, 11, 63, 167, 73, 28, 21, 63 ]
[ "passage: TAGS\n#license-openrail #region-us \n### Dataset Description#### Title\nLegal Texts and Summaries Dataset#### Description\nThis dataset is a collection of legal documents and their associated summaries, subjects (materia), and keywords (voces). It is primarily focused on the field of labor law, with particular emphasis on legal proceedings, labor rights, and workers' compensation laws in Argentina.#### Structure\nEach entry in the dataset contains the following fields:\n- 'sumario': A unique identifier for the legal document.\n- 'materia': The subject of the legal document, in this case, \"DERECHO DEL TRABAJO\" (Labor Law).\n- 'voces': Keywords or phrases summarizing the main topics of the document, such as \"FALLO PLENARIO\", \"DERECHO LABORAL\", \"LEY SOBRE RIESGOS DEL TRABAJO\", etc.\n- 'sentencia': The text of the legal document, which includes references to laws, legal precedents, and detailed analysis. The text was summarized using Claude v2 LLM.\n- 'texto': A legal summary.#### Applications\nThis dataset is valuable for legal research, especially in the domain of labor law. It can be used for training models in legal text summarization, keyword extraction, and legal document classification. Additionally, it's useful for academic research in legal studies, especially regarding labor law and workers' compensation in Argentina.#### Format\nThe dataset is provided in JSON format, ensuring easy integration with most data processing and machine learning tools.#### Language\nThe content is predominantly in Spanish, reflecting its focus on Argentine law.#### Source and Authenticity\nThe data is compiled from official legal documents and summaries from Argentina. It's important for users to verify the authenticity and current relevance of the legal texts as they might have undergone revisions or may not reflect the latest legal standings." ]
675a205c3527a5dbc59adb59139c7ec94e9a704c
# Bangumi Image Base of Seirei No Moribito This is the image base of bangumi Seirei no Moribito, we detected 26 characters, 2981 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 593 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 73 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 487 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 450 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 327 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 79 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 33 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 126 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 81 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 46 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 43 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 73 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 16 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 172 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 96 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 18 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 62 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 18 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 53 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 24 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 28 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 11 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 14 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 12 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 12 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | noise | 34 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/seireinomoribito
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-11-29T02:18:40+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-11-29T04:06:55+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Seirei No Moribito ======================================== This is the image base of bangumi Seirei no Moribito, we detected 26 characters, 2981 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
6517e60c771b3054cbe1cdd19e1a1678c24ee04e
# Bangumi Image Base of Jigoku Shoujo - Yoi No Togi This is the image base of bangumi Jigoku Shoujo - Yoi no Togi, we detected 53 characters, 2595 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 232 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 25 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 55 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 30 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 11 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 11 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 28 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 44 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 46 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 26 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 39 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 212 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 68 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 132 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 51 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 46 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 132 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 119 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 60 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 17 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 42 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 35 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 58 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 17 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 8 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 62 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 35 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 36 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 60 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | ![preview 8](28/preview_8.png) | | 29 | 48 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | ![preview 7](29/preview_7.png) | ![preview 8](29/preview_8.png) | | 30 | 61 | [Download](30/dataset.zip) | ![preview 1](30/preview_1.png) | ![preview 2](30/preview_2.png) | ![preview 3](30/preview_3.png) | ![preview 4](30/preview_4.png) | ![preview 5](30/preview_5.png) | ![preview 6](30/preview_6.png) | ![preview 7](30/preview_7.png) | ![preview 8](30/preview_8.png) | | 31 | 105 | [Download](31/dataset.zip) | ![preview 1](31/preview_1.png) | ![preview 2](31/preview_2.png) | ![preview 3](31/preview_3.png) | ![preview 4](31/preview_4.png) | ![preview 5](31/preview_5.png) | ![preview 6](31/preview_6.png) | ![preview 7](31/preview_7.png) | ![preview 8](31/preview_8.png) | | 32 | 21 | [Download](32/dataset.zip) | ![preview 1](32/preview_1.png) | ![preview 2](32/preview_2.png) | ![preview 3](32/preview_3.png) | ![preview 4](32/preview_4.png) | ![preview 5](32/preview_5.png) | ![preview 6](32/preview_6.png) | ![preview 7](32/preview_7.png) | ![preview 8](32/preview_8.png) | | 33 | 18 | [Download](33/dataset.zip) | ![preview 1](33/preview_1.png) | ![preview 2](33/preview_2.png) | ![preview 3](33/preview_3.png) | ![preview 4](33/preview_4.png) | ![preview 5](33/preview_5.png) | ![preview 6](33/preview_6.png) | ![preview 7](33/preview_7.png) | ![preview 8](33/preview_8.png) | | 34 | 23 | [Download](34/dataset.zip) | ![preview 1](34/preview_1.png) | ![preview 2](34/preview_2.png) | ![preview 3](34/preview_3.png) | ![preview 4](34/preview_4.png) | ![preview 5](34/preview_5.png) | ![preview 6](34/preview_6.png) | ![preview 7](34/preview_7.png) | ![preview 8](34/preview_8.png) | | 35 | 86 | [Download](35/dataset.zip) | ![preview 1](35/preview_1.png) | ![preview 2](35/preview_2.png) | ![preview 3](35/preview_3.png) | ![preview 4](35/preview_4.png) | ![preview 5](35/preview_5.png) | ![preview 6](35/preview_6.png) | ![preview 7](35/preview_7.png) | ![preview 8](35/preview_8.png) | | 36 | 14 | [Download](36/dataset.zip) | ![preview 1](36/preview_1.png) | ![preview 2](36/preview_2.png) | ![preview 3](36/preview_3.png) | ![preview 4](36/preview_4.png) | ![preview 5](36/preview_5.png) | ![preview 6](36/preview_6.png) | ![preview 7](36/preview_7.png) | ![preview 8](36/preview_8.png) | | 37 | 14 | [Download](37/dataset.zip) | ![preview 1](37/preview_1.png) | ![preview 2](37/preview_2.png) | ![preview 3](37/preview_3.png) | ![preview 4](37/preview_4.png) | ![preview 5](37/preview_5.png) | ![preview 6](37/preview_6.png) | ![preview 7](37/preview_7.png) | ![preview 8](37/preview_8.png) | | 38 | 68 | [Download](38/dataset.zip) | ![preview 1](38/preview_1.png) | ![preview 2](38/preview_2.png) | ![preview 3](38/preview_3.png) | ![preview 4](38/preview_4.png) | ![preview 5](38/preview_5.png) | ![preview 6](38/preview_6.png) | ![preview 7](38/preview_7.png) | ![preview 8](38/preview_8.png) | | 39 | 16 | [Download](39/dataset.zip) | ![preview 1](39/preview_1.png) | ![preview 2](39/preview_2.png) | ![preview 3](39/preview_3.png) | ![preview 4](39/preview_4.png) | ![preview 5](39/preview_5.png) | ![preview 6](39/preview_6.png) | ![preview 7](39/preview_7.png) | ![preview 8](39/preview_8.png) | | 40 | 45 | [Download](40/dataset.zip) | ![preview 1](40/preview_1.png) | ![preview 2](40/preview_2.png) | ![preview 3](40/preview_3.png) | ![preview 4](40/preview_4.png) | ![preview 5](40/preview_5.png) | ![preview 6](40/preview_6.png) | ![preview 7](40/preview_7.png) | ![preview 8](40/preview_8.png) | | 41 | 30 | [Download](41/dataset.zip) | ![preview 1](41/preview_1.png) | ![preview 2](41/preview_2.png) | ![preview 3](41/preview_3.png) | ![preview 4](41/preview_4.png) | ![preview 5](41/preview_5.png) | ![preview 6](41/preview_6.png) | ![preview 7](41/preview_7.png) | ![preview 8](41/preview_8.png) | | 42 | 11 | [Download](42/dataset.zip) | ![preview 1](42/preview_1.png) | ![preview 2](42/preview_2.png) | ![preview 3](42/preview_3.png) | ![preview 4](42/preview_4.png) | ![preview 5](42/preview_5.png) | ![preview 6](42/preview_6.png) | ![preview 7](42/preview_7.png) | ![preview 8](42/preview_8.png) | | 43 | 75 | [Download](43/dataset.zip) | ![preview 1](43/preview_1.png) | ![preview 2](43/preview_2.png) | ![preview 3](43/preview_3.png) | ![preview 4](43/preview_4.png) | ![preview 5](43/preview_5.png) | ![preview 6](43/preview_6.png) | ![preview 7](43/preview_7.png) | ![preview 8](43/preview_8.png) | | 44 | 22 | [Download](44/dataset.zip) | ![preview 1](44/preview_1.png) | ![preview 2](44/preview_2.png) | ![preview 3](44/preview_3.png) | ![preview 4](44/preview_4.png) | ![preview 5](44/preview_5.png) | ![preview 6](44/preview_6.png) | ![preview 7](44/preview_7.png) | ![preview 8](44/preview_8.png) | | 45 | 25 | [Download](45/dataset.zip) | ![preview 1](45/preview_1.png) | ![preview 2](45/preview_2.png) | ![preview 3](45/preview_3.png) | ![preview 4](45/preview_4.png) | ![preview 5](45/preview_5.png) | ![preview 6](45/preview_6.png) | ![preview 7](45/preview_7.png) | ![preview 8](45/preview_8.png) | | 46 | 20 | [Download](46/dataset.zip) | ![preview 1](46/preview_1.png) | ![preview 2](46/preview_2.png) | ![preview 3](46/preview_3.png) | ![preview 4](46/preview_4.png) | ![preview 5](46/preview_5.png) | ![preview 6](46/preview_6.png) | ![preview 7](46/preview_7.png) | ![preview 8](46/preview_8.png) | | 47 | 30 | [Download](47/dataset.zip) | ![preview 1](47/preview_1.png) | ![preview 2](47/preview_2.png) | ![preview 3](47/preview_3.png) | ![preview 4](47/preview_4.png) | ![preview 5](47/preview_5.png) | ![preview 6](47/preview_6.png) | ![preview 7](47/preview_7.png) | ![preview 8](47/preview_8.png) | | 48 | 26 | [Download](48/dataset.zip) | ![preview 1](48/preview_1.png) | ![preview 2](48/preview_2.png) | ![preview 3](48/preview_3.png) | ![preview 4](48/preview_4.png) | ![preview 5](48/preview_5.png) | ![preview 6](48/preview_6.png) | ![preview 7](48/preview_7.png) | ![preview 8](48/preview_8.png) | | 49 | 13 | [Download](49/dataset.zip) | ![preview 1](49/preview_1.png) | ![preview 2](49/preview_2.png) | ![preview 3](49/preview_3.png) | ![preview 4](49/preview_4.png) | ![preview 5](49/preview_5.png) | ![preview 6](49/preview_6.png) | ![preview 7](49/preview_7.png) | ![preview 8](49/preview_8.png) | | 50 | 12 | [Download](50/dataset.zip) | ![preview 1](50/preview_1.png) | ![preview 2](50/preview_2.png) | ![preview 3](50/preview_3.png) | ![preview 4](50/preview_4.png) | ![preview 5](50/preview_5.png) | ![preview 6](50/preview_6.png) | ![preview 7](50/preview_7.png) | ![preview 8](50/preview_8.png) | | 51 | 16 | [Download](51/dataset.zip) | ![preview 1](51/preview_1.png) | ![preview 2](51/preview_2.png) | ![preview 3](51/preview_3.png) | ![preview 4](51/preview_4.png) | ![preview 5](51/preview_5.png) | ![preview 6](51/preview_6.png) | ![preview 7](51/preview_7.png) | ![preview 8](51/preview_8.png) | | noise | 59 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/jigokushoujoyoinotogi
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-11-29T02:50:13+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-11-29T04:33:53+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Jigoku Shoujo - Yoi No Togi ================================================= This is the image base of bangumi Jigoku Shoujo - Yoi no Togi, we detected 53 characters, 2595 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
d892b0fc6184bdd711f44088cf787333f3eaffd1
# Bangumi Image Base of Jigoku Shoujo This is the image base of bangumi Jigoku Shoujo, we detected 89 characters, 6611 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 538 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 26 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 26 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 102 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 52 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 434 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 637 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 703 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 102 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 56 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 50 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 73 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 35 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 109 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 211 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 127 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 25 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 81 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 18 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 31 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 51 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 109 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 46 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 42 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 78 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 31 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 136 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 48 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 30 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | ![preview 8](28/preview_8.png) | | 29 | 49 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | ![preview 7](29/preview_7.png) | ![preview 8](29/preview_8.png) | | 30 | 27 | [Download](30/dataset.zip) | ![preview 1](30/preview_1.png) | ![preview 2](30/preview_2.png) | ![preview 3](30/preview_3.png) | ![preview 4](30/preview_4.png) | ![preview 5](30/preview_5.png) | ![preview 6](30/preview_6.png) | ![preview 7](30/preview_7.png) | ![preview 8](30/preview_8.png) | | 31 | 29 | [Download](31/dataset.zip) | ![preview 1](31/preview_1.png) | ![preview 2](31/preview_2.png) | ![preview 3](31/preview_3.png) | ![preview 4](31/preview_4.png) | ![preview 5](31/preview_5.png) | ![preview 6](31/preview_6.png) | ![preview 7](31/preview_7.png) | ![preview 8](31/preview_8.png) | | 32 | 44 | [Download](32/dataset.zip) | ![preview 1](32/preview_1.png) | ![preview 2](32/preview_2.png) | ![preview 3](32/preview_3.png) | ![preview 4](32/preview_4.png) | ![preview 5](32/preview_5.png) | ![preview 6](32/preview_6.png) | ![preview 7](32/preview_7.png) | ![preview 8](32/preview_8.png) | | 33 | 428 | [Download](33/dataset.zip) | ![preview 1](33/preview_1.png) | ![preview 2](33/preview_2.png) | ![preview 3](33/preview_3.png) | ![preview 4](33/preview_4.png) | ![preview 5](33/preview_5.png) | ![preview 6](33/preview_6.png) | ![preview 7](33/preview_7.png) | ![preview 8](33/preview_8.png) | | 34 | 41 | [Download](34/dataset.zip) | ![preview 1](34/preview_1.png) | ![preview 2](34/preview_2.png) | ![preview 3](34/preview_3.png) | ![preview 4](34/preview_4.png) | ![preview 5](34/preview_5.png) | ![preview 6](34/preview_6.png) | ![preview 7](34/preview_7.png) | ![preview 8](34/preview_8.png) | | 35 | 55 | [Download](35/dataset.zip) | ![preview 1](35/preview_1.png) | ![preview 2](35/preview_2.png) | ![preview 3](35/preview_3.png) | ![preview 4](35/preview_4.png) | ![preview 5](35/preview_5.png) | ![preview 6](35/preview_6.png) | ![preview 7](35/preview_7.png) | ![preview 8](35/preview_8.png) | | 36 | 40 | [Download](36/dataset.zip) | ![preview 1](36/preview_1.png) | ![preview 2](36/preview_2.png) | ![preview 3](36/preview_3.png) | ![preview 4](36/preview_4.png) | ![preview 5](36/preview_5.png) | ![preview 6](36/preview_6.png) | ![preview 7](36/preview_7.png) | ![preview 8](36/preview_8.png) | | 37 | 23 | [Download](37/dataset.zip) | ![preview 1](37/preview_1.png) | ![preview 2](37/preview_2.png) | ![preview 3](37/preview_3.png) | ![preview 4](37/preview_4.png) | ![preview 5](37/preview_5.png) | ![preview 6](37/preview_6.png) | ![preview 7](37/preview_7.png) | ![preview 8](37/preview_8.png) | | 38 | 271 | [Download](38/dataset.zip) | ![preview 1](38/preview_1.png) | ![preview 2](38/preview_2.png) | ![preview 3](38/preview_3.png) | ![preview 4](38/preview_4.png) | ![preview 5](38/preview_5.png) | ![preview 6](38/preview_6.png) | ![preview 7](38/preview_7.png) | ![preview 8](38/preview_8.png) | | 39 | 41 | [Download](39/dataset.zip) | ![preview 1](39/preview_1.png) | ![preview 2](39/preview_2.png) | ![preview 3](39/preview_3.png) | ![preview 4](39/preview_4.png) | ![preview 5](39/preview_5.png) | ![preview 6](39/preview_6.png) | ![preview 7](39/preview_7.png) | ![preview 8](39/preview_8.png) | | 40 | 42 | [Download](40/dataset.zip) | ![preview 1](40/preview_1.png) | ![preview 2](40/preview_2.png) | ![preview 3](40/preview_3.png) | ![preview 4](40/preview_4.png) | ![preview 5](40/preview_5.png) | ![preview 6](40/preview_6.png) | ![preview 7](40/preview_7.png) | ![preview 8](40/preview_8.png) | | 41 | 39 | [Download](41/dataset.zip) | ![preview 1](41/preview_1.png) | ![preview 2](41/preview_2.png) | ![preview 3](41/preview_3.png) | ![preview 4](41/preview_4.png) | ![preview 5](41/preview_5.png) | ![preview 6](41/preview_6.png) | ![preview 7](41/preview_7.png) | ![preview 8](41/preview_8.png) | | 42 | 12 | [Download](42/dataset.zip) | ![preview 1](42/preview_1.png) | ![preview 2](42/preview_2.png) | ![preview 3](42/preview_3.png) | ![preview 4](42/preview_4.png) | ![preview 5](42/preview_5.png) | ![preview 6](42/preview_6.png) | ![preview 7](42/preview_7.png) | ![preview 8](42/preview_8.png) | | 43 | 23 | [Download](43/dataset.zip) | ![preview 1](43/preview_1.png) | ![preview 2](43/preview_2.png) | ![preview 3](43/preview_3.png) | ![preview 4](43/preview_4.png) | ![preview 5](43/preview_5.png) | ![preview 6](43/preview_6.png) | ![preview 7](43/preview_7.png) | ![preview 8](43/preview_8.png) | | 44 | 26 | [Download](44/dataset.zip) | ![preview 1](44/preview_1.png) | ![preview 2](44/preview_2.png) | ![preview 3](44/preview_3.png) | ![preview 4](44/preview_4.png) | ![preview 5](44/preview_5.png) | ![preview 6](44/preview_6.png) | ![preview 7](44/preview_7.png) | ![preview 8](44/preview_8.png) | | 45 | 68 | [Download](45/dataset.zip) | ![preview 1](45/preview_1.png) | ![preview 2](45/preview_2.png) | ![preview 3](45/preview_3.png) | ![preview 4](45/preview_4.png) | ![preview 5](45/preview_5.png) | ![preview 6](45/preview_6.png) | ![preview 7](45/preview_7.png) | ![preview 8](45/preview_8.png) | | 46 | 47 | [Download](46/dataset.zip) | ![preview 1](46/preview_1.png) | ![preview 2](46/preview_2.png) | ![preview 3](46/preview_3.png) | ![preview 4](46/preview_4.png) | ![preview 5](46/preview_5.png) | ![preview 6](46/preview_6.png) | ![preview 7](46/preview_7.png) | ![preview 8](46/preview_8.png) | | 47 | 53 | [Download](47/dataset.zip) | ![preview 1](47/preview_1.png) | ![preview 2](47/preview_2.png) | ![preview 3](47/preview_3.png) | ![preview 4](47/preview_4.png) | ![preview 5](47/preview_5.png) | ![preview 6](47/preview_6.png) | ![preview 7](47/preview_7.png) | ![preview 8](47/preview_8.png) | | 48 | 24 | [Download](48/dataset.zip) | ![preview 1](48/preview_1.png) | ![preview 2](48/preview_2.png) | ![preview 3](48/preview_3.png) | ![preview 4](48/preview_4.png) | ![preview 5](48/preview_5.png) | ![preview 6](48/preview_6.png) | ![preview 7](48/preview_7.png) | ![preview 8](48/preview_8.png) | | 49 | 26 | [Download](49/dataset.zip) | ![preview 1](49/preview_1.png) | ![preview 2](49/preview_2.png) | ![preview 3](49/preview_3.png) | ![preview 4](49/preview_4.png) | ![preview 5](49/preview_5.png) | ![preview 6](49/preview_6.png) | ![preview 7](49/preview_7.png) | ![preview 8](49/preview_8.png) | | 50 | 185 | [Download](50/dataset.zip) | ![preview 1](50/preview_1.png) | ![preview 2](50/preview_2.png) | ![preview 3](50/preview_3.png) | ![preview 4](50/preview_4.png) | ![preview 5](50/preview_5.png) | ![preview 6](50/preview_6.png) | ![preview 7](50/preview_7.png) | ![preview 8](50/preview_8.png) | | 51 | 46 | [Download](51/dataset.zip) | ![preview 1](51/preview_1.png) | ![preview 2](51/preview_2.png) | ![preview 3](51/preview_3.png) | ![preview 4](51/preview_4.png) | ![preview 5](51/preview_5.png) | ![preview 6](51/preview_6.png) | ![preview 7](51/preview_7.png) | ![preview 8](51/preview_8.png) | | 52 | 32 | [Download](52/dataset.zip) | ![preview 1](52/preview_1.png) | ![preview 2](52/preview_2.png) | ![preview 3](52/preview_3.png) | ![preview 4](52/preview_4.png) | ![preview 5](52/preview_5.png) | ![preview 6](52/preview_6.png) | ![preview 7](52/preview_7.png) | ![preview 8](52/preview_8.png) | | 53 | 27 | [Download](53/dataset.zip) | ![preview 1](53/preview_1.png) | ![preview 2](53/preview_2.png) | ![preview 3](53/preview_3.png) | ![preview 4](53/preview_4.png) | ![preview 5](53/preview_5.png) | ![preview 6](53/preview_6.png) | ![preview 7](53/preview_7.png) | ![preview 8](53/preview_8.png) | | 54 | 50 | [Download](54/dataset.zip) | ![preview 1](54/preview_1.png) | ![preview 2](54/preview_2.png) | ![preview 3](54/preview_3.png) | ![preview 4](54/preview_4.png) | ![preview 5](54/preview_5.png) | ![preview 6](54/preview_6.png) | ![preview 7](54/preview_7.png) | ![preview 8](54/preview_8.png) | | 55 | 40 | [Download](55/dataset.zip) | ![preview 1](55/preview_1.png) | ![preview 2](55/preview_2.png) | ![preview 3](55/preview_3.png) | ![preview 4](55/preview_4.png) | ![preview 5](55/preview_5.png) | ![preview 6](55/preview_6.png) | ![preview 7](55/preview_7.png) | ![preview 8](55/preview_8.png) | | 56 | 12 | [Download](56/dataset.zip) | ![preview 1](56/preview_1.png) | ![preview 2](56/preview_2.png) | ![preview 3](56/preview_3.png) | ![preview 4](56/preview_4.png) | ![preview 5](56/preview_5.png) | ![preview 6](56/preview_6.png) | ![preview 7](56/preview_7.png) | ![preview 8](56/preview_8.png) | | 57 | 25 | [Download](57/dataset.zip) | ![preview 1](57/preview_1.png) | ![preview 2](57/preview_2.png) | ![preview 3](57/preview_3.png) | ![preview 4](57/preview_4.png) | ![preview 5](57/preview_5.png) | ![preview 6](57/preview_6.png) | ![preview 7](57/preview_7.png) | ![preview 8](57/preview_8.png) | | 58 | 35 | [Download](58/dataset.zip) | ![preview 1](58/preview_1.png) | ![preview 2](58/preview_2.png) | ![preview 3](58/preview_3.png) | ![preview 4](58/preview_4.png) | ![preview 5](58/preview_5.png) | ![preview 6](58/preview_6.png) | ![preview 7](58/preview_7.png) | ![preview 8](58/preview_8.png) | | 59 | 17 | [Download](59/dataset.zip) | ![preview 1](59/preview_1.png) | ![preview 2](59/preview_2.png) | ![preview 3](59/preview_3.png) | ![preview 4](59/preview_4.png) | ![preview 5](59/preview_5.png) | ![preview 6](59/preview_6.png) | ![preview 7](59/preview_7.png) | ![preview 8](59/preview_8.png) | | 60 | 66 | [Download](60/dataset.zip) | ![preview 1](60/preview_1.png) | ![preview 2](60/preview_2.png) | ![preview 3](60/preview_3.png) | ![preview 4](60/preview_4.png) | ![preview 5](60/preview_5.png) | ![preview 6](60/preview_6.png) | ![preview 7](60/preview_7.png) | ![preview 8](60/preview_8.png) | | 61 | 41 | [Download](61/dataset.zip) | ![preview 1](61/preview_1.png) | ![preview 2](61/preview_2.png) | ![preview 3](61/preview_3.png) | ![preview 4](61/preview_4.png) | ![preview 5](61/preview_5.png) | ![preview 6](61/preview_6.png) | ![preview 7](61/preview_7.png) | ![preview 8](61/preview_8.png) | | 62 | 28 | [Download](62/dataset.zip) | ![preview 1](62/preview_1.png) | ![preview 2](62/preview_2.png) | ![preview 3](62/preview_3.png) | ![preview 4](62/preview_4.png) | ![preview 5](62/preview_5.png) | ![preview 6](62/preview_6.png) | ![preview 7](62/preview_7.png) | ![preview 8](62/preview_8.png) | | 63 | 21 | [Download](63/dataset.zip) | ![preview 1](63/preview_1.png) | ![preview 2](63/preview_2.png) | ![preview 3](63/preview_3.png) | ![preview 4](63/preview_4.png) | ![preview 5](63/preview_5.png) | ![preview 6](63/preview_6.png) | ![preview 7](63/preview_7.png) | ![preview 8](63/preview_8.png) | | 64 | 17 | [Download](64/dataset.zip) | ![preview 1](64/preview_1.png) | ![preview 2](64/preview_2.png) | ![preview 3](64/preview_3.png) | ![preview 4](64/preview_4.png) | ![preview 5](64/preview_5.png) | ![preview 6](64/preview_6.png) | ![preview 7](64/preview_7.png) | ![preview 8](64/preview_8.png) | | 65 | 16 | [Download](65/dataset.zip) | ![preview 1](65/preview_1.png) | ![preview 2](65/preview_2.png) | ![preview 3](65/preview_3.png) | ![preview 4](65/preview_4.png) | ![preview 5](65/preview_5.png) | ![preview 6](65/preview_6.png) | ![preview 7](65/preview_7.png) | ![preview 8](65/preview_8.png) | | 66 | 54 | [Download](66/dataset.zip) | ![preview 1](66/preview_1.png) | ![preview 2](66/preview_2.png) | ![preview 3](66/preview_3.png) | ![preview 4](66/preview_4.png) | ![preview 5](66/preview_5.png) | ![preview 6](66/preview_6.png) | ![preview 7](66/preview_7.png) | ![preview 8](66/preview_8.png) | | 67 | 22 | [Download](67/dataset.zip) | ![preview 1](67/preview_1.png) | ![preview 2](67/preview_2.png) | ![preview 3](67/preview_3.png) | ![preview 4](67/preview_4.png) | ![preview 5](67/preview_5.png) | ![preview 6](67/preview_6.png) | ![preview 7](67/preview_7.png) | ![preview 8](67/preview_8.png) | | 68 | 17 | [Download](68/dataset.zip) | ![preview 1](68/preview_1.png) | ![preview 2](68/preview_2.png) | ![preview 3](68/preview_3.png) | ![preview 4](68/preview_4.png) | ![preview 5](68/preview_5.png) | ![preview 6](68/preview_6.png) | ![preview 7](68/preview_7.png) | ![preview 8](68/preview_8.png) | | 69 | 9 | [Download](69/dataset.zip) | ![preview 1](69/preview_1.png) | ![preview 2](69/preview_2.png) | ![preview 3](69/preview_3.png) | ![preview 4](69/preview_4.png) | ![preview 5](69/preview_5.png) | ![preview 6](69/preview_6.png) | ![preview 7](69/preview_7.png) | ![preview 8](69/preview_8.png) | | 70 | 21 | [Download](70/dataset.zip) | ![preview 1](70/preview_1.png) | ![preview 2](70/preview_2.png) | ![preview 3](70/preview_3.png) | ![preview 4](70/preview_4.png) | ![preview 5](70/preview_5.png) | ![preview 6](70/preview_6.png) | ![preview 7](70/preview_7.png) | ![preview 8](70/preview_8.png) | | 71 | 30 | [Download](71/dataset.zip) | ![preview 1](71/preview_1.png) | ![preview 2](71/preview_2.png) | ![preview 3](71/preview_3.png) | ![preview 4](71/preview_4.png) | ![preview 5](71/preview_5.png) | ![preview 6](71/preview_6.png) | ![preview 7](71/preview_7.png) | ![preview 8](71/preview_8.png) | | 72 | 15 | [Download](72/dataset.zip) | ![preview 1](72/preview_1.png) | ![preview 2](72/preview_2.png) | ![preview 3](72/preview_3.png) | ![preview 4](72/preview_4.png) | ![preview 5](72/preview_5.png) | ![preview 6](72/preview_6.png) | ![preview 7](72/preview_7.png) | ![preview 8](72/preview_8.png) | | 73 | 21 | [Download](73/dataset.zip) | ![preview 1](73/preview_1.png) | ![preview 2](73/preview_2.png) | ![preview 3](73/preview_3.png) | ![preview 4](73/preview_4.png) | ![preview 5](73/preview_5.png) | ![preview 6](73/preview_6.png) | ![preview 7](73/preview_7.png) | ![preview 8](73/preview_8.png) | | 74 | 19 | [Download](74/dataset.zip) | ![preview 1](74/preview_1.png) | ![preview 2](74/preview_2.png) | ![preview 3](74/preview_3.png) | ![preview 4](74/preview_4.png) | ![preview 5](74/preview_5.png) | ![preview 6](74/preview_6.png) | ![preview 7](74/preview_7.png) | ![preview 8](74/preview_8.png) | | 75 | 13 | [Download](75/dataset.zip) | ![preview 1](75/preview_1.png) | ![preview 2](75/preview_2.png) | ![preview 3](75/preview_3.png) | ![preview 4](75/preview_4.png) | ![preview 5](75/preview_5.png) | ![preview 6](75/preview_6.png) | ![preview 7](75/preview_7.png) | ![preview 8](75/preview_8.png) | | 76 | 17 | [Download](76/dataset.zip) | ![preview 1](76/preview_1.png) | ![preview 2](76/preview_2.png) | ![preview 3](76/preview_3.png) | ![preview 4](76/preview_4.png) | ![preview 5](76/preview_5.png) | ![preview 6](76/preview_6.png) | ![preview 7](76/preview_7.png) | ![preview 8](76/preview_8.png) | | 77 | 13 | [Download](77/dataset.zip) | ![preview 1](77/preview_1.png) | ![preview 2](77/preview_2.png) | ![preview 3](77/preview_3.png) | ![preview 4](77/preview_4.png) | ![preview 5](77/preview_5.png) | ![preview 6](77/preview_6.png) | ![preview 7](77/preview_7.png) | ![preview 8](77/preview_8.png) | | 78 | 101 | [Download](78/dataset.zip) | ![preview 1](78/preview_1.png) | ![preview 2](78/preview_2.png) | ![preview 3](78/preview_3.png) | ![preview 4](78/preview_4.png) | ![preview 5](78/preview_5.png) | ![preview 6](78/preview_6.png) | ![preview 7](78/preview_7.png) | ![preview 8](78/preview_8.png) | | 79 | 17 | [Download](79/dataset.zip) | ![preview 1](79/preview_1.png) | ![preview 2](79/preview_2.png) | ![preview 3](79/preview_3.png) | ![preview 4](79/preview_4.png) | ![preview 5](79/preview_5.png) | ![preview 6](79/preview_6.png) | ![preview 7](79/preview_7.png) | ![preview 8](79/preview_8.png) | | 80 | 23 | [Download](80/dataset.zip) | ![preview 1](80/preview_1.png) | ![preview 2](80/preview_2.png) | ![preview 3](80/preview_3.png) | ![preview 4](80/preview_4.png) | ![preview 5](80/preview_5.png) | ![preview 6](80/preview_6.png) | ![preview 7](80/preview_7.png) | ![preview 8](80/preview_8.png) | | 81 | 23 | [Download](81/dataset.zip) | ![preview 1](81/preview_1.png) | ![preview 2](81/preview_2.png) | ![preview 3](81/preview_3.png) | ![preview 4](81/preview_4.png) | ![preview 5](81/preview_5.png) | ![preview 6](81/preview_6.png) | ![preview 7](81/preview_7.png) | ![preview 8](81/preview_8.png) | | 82 | 7 | [Download](82/dataset.zip) | ![preview 1](82/preview_1.png) | ![preview 2](82/preview_2.png) | ![preview 3](82/preview_3.png) | ![preview 4](82/preview_4.png) | ![preview 5](82/preview_5.png) | ![preview 6](82/preview_6.png) | ![preview 7](82/preview_7.png) | N/A | | 83 | 7 | [Download](83/dataset.zip) | ![preview 1](83/preview_1.png) | ![preview 2](83/preview_2.png) | ![preview 3](83/preview_3.png) | ![preview 4](83/preview_4.png) | ![preview 5](83/preview_5.png) | ![preview 6](83/preview_6.png) | ![preview 7](83/preview_7.png) | N/A | | 84 | 29 | [Download](84/dataset.zip) | ![preview 1](84/preview_1.png) | ![preview 2](84/preview_2.png) | ![preview 3](84/preview_3.png) | ![preview 4](84/preview_4.png) | ![preview 5](84/preview_5.png) | ![preview 6](84/preview_6.png) | ![preview 7](84/preview_7.png) | ![preview 8](84/preview_8.png) | | 85 | 14 | [Download](85/dataset.zip) | ![preview 1](85/preview_1.png) | ![preview 2](85/preview_2.png) | ![preview 3](85/preview_3.png) | ![preview 4](85/preview_4.png) | ![preview 5](85/preview_5.png) | ![preview 6](85/preview_6.png) | ![preview 7](85/preview_7.png) | ![preview 8](85/preview_8.png) | | 86 | 5 | [Download](86/dataset.zip) | ![preview 1](86/preview_1.png) | ![preview 2](86/preview_2.png) | ![preview 3](86/preview_3.png) | ![preview 4](86/preview_4.png) | ![preview 5](86/preview_5.png) | N/A | N/A | N/A | | 87 | 7 | [Download](87/dataset.zip) | ![preview 1](87/preview_1.png) | ![preview 2](87/preview_2.png) | ![preview 3](87/preview_3.png) | ![preview 4](87/preview_4.png) | ![preview 5](87/preview_5.png) | ![preview 6](87/preview_6.png) | ![preview 7](87/preview_7.png) | N/A | | noise | 54 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/jigokushoujo
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-11-29T02:55:17+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-11-29T05:08:17+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Jigoku Shoujo =================================== This is the image base of bangumi Jigoku Shoujo, we detected 89 characters, 6611 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
94f8f27169236642b3242f405dfb784690f25d56
# Dataset Card for "repo_name" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Nikalal/repo_name
[ "region:us" ]
2023-11-29T03:24:00+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 1441016.0, "num_examples": 90}, {"name": "validation", "num_bytes": 147366.0, "num_examples": 11}], "download_size": 0, "dataset_size": 1588382.0}}
2023-12-01T06:29:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for "repo_name" More Information needed
[ "# Dataset Card for \"repo_name\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"repo_name\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"repo_name\"\n\nMore Information needed" ]
a1ec3c7be11b11ca96850afef741d77893d0ff05
# Dataset Card for "SurfaceRoughness" ### Dataset Summary A collection of data representing surface roughness categories of B, C, and D according to ASCE 7-16 26.7.2 ### Data Structure An example looks like below: ```python {'image': <PIL.PngImagePlugin.PngImageFile image mode=RGBA size=1041x639>, 'label': 0,} ``` ### Data Split | |train |validation | test | |-------------|------:|---------:|------:| |# of examples|66 |15 |9 |
sh-zheng/SurfaceRoughness
[ "region:us" ]
2023-11-29T03:31:08+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "RoughnessB", "1": "RoughnessC", "2": "RoughnessD"}}}}], "splits": [{"name": "train", "num_bytes": 49679719.0, "num_examples": 66}, {"name": "validation", "num_bytes": 17272712.0, "num_examples": 9}, {"name": "test", "num_bytes": 24382239.0, "num_examples": 15}], "download_size": 91342507, "dataset_size": 91334670.0}}
2023-11-29T04:06:42+00:00
[]
[]
TAGS #region-us
Dataset Card for "SurfaceRoughness" =================================== ### Dataset Summary A collection of data representing surface roughness categories of B, C, and D according to ASCE 7-16 26.7.2 ### Data Structure An example looks like below: ### Data Split
[ "### Dataset Summary\n\n\nA collection of data representing surface roughness categories of B, C, and D according to ASCE 7-16 26.7.2", "### Data Structure\n\n\nAn example looks like below:", "### Data Split" ]
[ "TAGS\n#region-us \n", "### Dataset Summary\n\n\nA collection of data representing surface roughness categories of B, C, and D according to ASCE 7-16 26.7.2", "### Data Structure\n\n\nAn example looks like below:", "### Data Split" ]
[ 6, 32, 12, 4 ]
[ "passage: TAGS\n#region-us \n### Dataset Summary\n\n\nA collection of data representing surface roughness categories of B, C, and D according to ASCE 7-16 26.7.2### Data Structure\n\n\nAn example looks like below:### Data Split" ]
fe725b939224152a19b580e99b16f90ca962e361
# Bangumi Image Base of Jigokuraku This is the image base of bangumi Jigokuraku, we detected 36 characters, 2877 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 187 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 102 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 109 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 23 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 41 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 26 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 125 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 392 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 67 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 18 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 75 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 27 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 22 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 105 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 43 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 32 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 48 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 113 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 83 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 104 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 12 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 28 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 25 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 32 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 216 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 10 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 19 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 9 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 469 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | ![preview 8](28/preview_8.png) | | 29 | 27 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | ![preview 7](29/preview_7.png) | ![preview 8](29/preview_8.png) | | 30 | 42 | [Download](30/dataset.zip) | ![preview 1](30/preview_1.png) | ![preview 2](30/preview_2.png) | ![preview 3](30/preview_3.png) | ![preview 4](30/preview_4.png) | ![preview 5](30/preview_5.png) | ![preview 6](30/preview_6.png) | ![preview 7](30/preview_7.png) | ![preview 8](30/preview_8.png) | | 31 | 39 | [Download](31/dataset.zip) | ![preview 1](31/preview_1.png) | ![preview 2](31/preview_2.png) | ![preview 3](31/preview_3.png) | ![preview 4](31/preview_4.png) | ![preview 5](31/preview_5.png) | ![preview 6](31/preview_6.png) | ![preview 7](31/preview_7.png) | ![preview 8](31/preview_8.png) | | 32 | 14 | [Download](32/dataset.zip) | ![preview 1](32/preview_1.png) | ![preview 2](32/preview_2.png) | ![preview 3](32/preview_3.png) | ![preview 4](32/preview_4.png) | ![preview 5](32/preview_5.png) | ![preview 6](32/preview_6.png) | ![preview 7](32/preview_7.png) | ![preview 8](32/preview_8.png) | | 33 | 64 | [Download](33/dataset.zip) | ![preview 1](33/preview_1.png) | ![preview 2](33/preview_2.png) | ![preview 3](33/preview_3.png) | ![preview 4](33/preview_4.png) | ![preview 5](33/preview_5.png) | ![preview 6](33/preview_6.png) | ![preview 7](33/preview_7.png) | ![preview 8](33/preview_8.png) | | 34 | 11 | [Download](34/dataset.zip) | ![preview 1](34/preview_1.png) | ![preview 2](34/preview_2.png) | ![preview 3](34/preview_3.png) | ![preview 4](34/preview_4.png) | ![preview 5](34/preview_5.png) | ![preview 6](34/preview_6.png) | ![preview 7](34/preview_7.png) | ![preview 8](34/preview_8.png) | | noise | 118 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/jigokuraku
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-11-29T03:45:45+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-11-29T05:56:41+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Jigokuraku ================================ This is the image base of bangumi Jigokuraku, we detected 36 characters, 2877 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
cc500f6a14191fb98b64df6c2065acdbe38c372c
Portuguese and English Translations of readme are available here. ---- UNCLEANED VERSION - XLS format, which could be combined and cleaned Combined and partially cleaned, partially reformatted, partial ETL version is available at Solshine/Portuguese-English-Vocab-PartiallyTransformed *English Readme:* **Bridging Language Barriers and Empowering Marginalized Communities through Multilingual Parallel Corpora: The English-Portuguese MPC** In the realm of natural language processing (NLP), the availability of high-quality language data is crucial for developing effective NLP models. However, marginalized indigenous communities, particularly those in Brazil and the Amazon Rainforest, often lack access to NLP tools and resources, hindering their ability to fully engage in the digital age. To address this disparity, we propose the creation of an English-Portuguese Multilingual Parallel Corpus (MPC), a collection of carefully curated parallel text pairs in English and Portuguese. This resource is specifically designed to democratize access to NLP and promote knowledge exchange between these two languages, fostering cross-cultural communication and empowering marginalized communities. **Democratizing Access and Fostering Cross-Pollination** This English-Portuguese MPC, Solshine/Portuguese-English-Translation-and-NLP-trainingdata-UNCLEANED , serves as a bridge between these two languages, enabling the development of more robust machine translation systems and enriching the availability of Portuguese language resources. For marginalized indigenous communities, this resource holds the potential to: * **Preserve and revitalize indigenous languages:** By providing a benchmark for Portuguese-English translation, the MPC can facilitate the development of tools that can translate indigenous languages into Portuguese, aiding in language preservation and revitalization efforts. * **Empower indigenous communities in the digital sphere:** Access to NLP tools trained on the MPC can enable indigenous communities to engage with online resources, participate in social media, and access information in their native language or Portuguese. * **Promote cross-cultural understanding:** By fostering better communication between indigenous communities and the wider world, the MPC can help bridge cultural divides and promote mutual understanding. **Addressing the Needs of Marginalized Indigenous Communities** The English-Portuguese MPC is particularly relevant to marginalized indigenous communities in Brazil and the Amazon Rainforest, where Portuguese serves as the lingua franca and numerous indigenous languages are spoken. By providing a comprehensive resource for Portuguese-English translation, the MPC can empower these communities to: * **Participate in education and research:** Indigenous students and researchers can access academic materials and engage in scientific research using NLP tools trained on the MPC. * **Promote cultural heritage and storytelling:** Indigenous communities can utilize the MPC to translate their cultural narratives, folklore, and traditional knowledge into Portuguese, preserving and sharing their heritage with a wider audience. * **Advocate for their rights and interests:** Indigenous communities can use the MPC to communicate effectively with government officials, NGOs, and the international community, advocating for their rights and interests. **Sourcing the MPC through Conversation with Bard** The English-Portuguese MPC was sourced through a combination of conversation with Bard and other sources. Bard's ability to understand and generate human language made it an invaluable tool for generating and refining the dataset, ensuring that the translations were accurate and natural-sounding. Once the data was collected, it was carefully preprocessed and aligned to ensure that the sentences in English and Portuguese were truly parallel. This was a critical step, as it ensured that the MPC could be used to train NLP models that could accurately translate between the two languages. **Conclusion** The English-Portuguese MPC here is a valuable resource for NLP researchers and developers. It can be used to train machine translation systems, develop text summarization tools, and create sentiment analysis models. The MPC can also be used to study the relationship between English and Portuguese, and to develop new NLP algorithms. Moreover, the MPC empowers marginalized communities to preserve their languages, engage in the digital sphere, and advocate for their rights and interests. ---- *Portuguese Readme:* **Superando barreiras linguísticas e empoderando comunidades marginalizadas por meio de corpora paralelos multilíngues: O MPC inglês-português** No campo do processamento de linguagem natural (PLN), a disponibilidade de dados linguísticos de alta qualidade é crucial para o desenvolvimento de modelos de PLN eficazes. No entanto, comunidades indígenas marginalizadas, particularmente aquelas no Brasil e na Amazônia, muitas vezes não têm acesso a ferramentas e recursos de PLN, dificultando sua capacidade de se envolver plenamente na era digital. Para lidar com essa disparidade, propomos a criação de um Corpus Paralelo Multilíngue (MPC) inglês-português, uma coleção de pares de texto paralelos cuidadosamente selecionados em inglês e português. Este recurso é projetado especificamente para democratizar o acesso ao PLN e promover o intercâmbio de conhecimento entre esses dois idiomas, fomentando a comunicação intercultural e empoderando comunidades marginalizadas. **Democratizando o acesso e fomentando a polinização cruzada** O MPC inglês-português, Solshine/Portuguese-English-Translation-and-NLP-trainingdata-UNCLEANED , serve como uma ponte entre esses dois idiomas, possibilitando o desenvolvimento de sistemas de tradução automática mais robustos e enriquecendo a disponibilidade de recursos em português. Para comunidades indígenas marginalizadas, este recurso tem o potencial de: * **Preservar e revitalizar línguas indígenas:** Ao fornecer um benchmark para a tradução português-inglês, o MPC pode facilitar o desenvolvimento de ferramentas que podem traduzir línguas indígenas para o português, auxiliando nos esforços de preservação e revitalização da língua. * **Empoderar comunidades indígenas na esfera digital:** O acesso a ferramentas de PLN treinadas no MPC pode capacitar comunidades indígenas a se envolver com recursos online, participar de mídias sociais e acessar informações em sua língua nativa ou português. * **Promover a compreensão intercultural:** Ao promover uma melhor comunicação entre as comunidades indígenas e o mundo em geral, o MPC pode ajudar a superar divisões culturais e promover a compreensão mútua. **Abordando as necessidades de comunidades indígenas marginalizadas** O MPC inglês-português é particularmente relevante para comunidades indígenas marginalizadas no Brasil e na Amazônia, onde o português serve como língua franca e inúmeras línguas indígenas são faladas. Ao fornecer um recurso abrangente para tradução português-inglês, o MPC pode capacitar essas comunidades a: * **Participar de educação e pesquisa:** Estudantes e pesquisadores indígenas podem acessar materiais acadêmicos e se envolver em pesquisas científicas usando ferramentas de PLN treinadas no MPC. * **Promover o patrimônio cultural e a narrativa:** Comunidades indígenas podem utilizar o MPC para traduzir suas narrativas culturais, folclore e conhecimento tradicional para o português, preservando e compartilhando seu patrimônio com um público mais amplo. * **Defender seus direitos e interesses:** As comunidades indígenas podem usar o MPC para se comunicar efetivamente com autoridades governamentais, ONGs e a comunidade internacional, defendendo seus direitos e interesses. **Obtendo o MPC por meio de conversas com Bard** O MPC inglês-português foi obtido por meio de uma combinação de conversas com Bard e outras fontes. A capacidade do Bard de entender e gerar linguagem humana o tornou uma ferramenta inestimável para gerar e refinar o conjunto de dados, garantindo que as traduções fossem precisas e naturais. Uma vez que os dados foram coletados, eles foram cuidadosamente pré-processados e alinhados para garantir que as frases em inglês e português fossem verdadeiramente paralelas. Esta foi uma etapa crítica, pois garantiu que o MPC pudesse ser usado para treinar modelos de PLN que pudessem traduzir com precisão entre os dois idiomas. **Conclusão** O MPC inglês-português é um recurso valioso para pesquisadores e desenvolvedores de PLN. Ele pode ser usado para treinar sistemas de tradução automática, desenvolver ferramentas de resumo de texto e criar modelos de análise de sentimento. O MPC também pode ser usado para estudar a relação entre inglês e português e desenvolver novos algoritmos de PLN. Além disso, o MPC capacita comunidades marginalizadas a preservar seus idiomas, se envolver na esfera digital e defender seus direitos e interesses. ---- **Notes about the data (currently being updated):** Inspired by the (November 2023) developments of Ocra2 and the ability for large LLMs to now produce training datasets for smaller (ie 7B or 3B) models to efficienctly learn and distill the fundamentals of the knowledge into themselves. This is a great Portuguese language dataset, connecting Portuguese into the most widely used and trained language (English), thus democratizing access. You are encouraged to use this in your training to enrich the model's Portuguese. Most of the table include: G3 Annotations, B- Tags , SRL Annotations, Dependency Parsing Annotations , POS Tagging Annotations Please note for a large portion of the data: The NER annotations (G3) indicate general entities, while the B- tags indicate specific types of entities (e.g., B-Location, B-Time). The SRL annotations indicate the semantic roles of the constituents in the sentence (e.g., B-Theme, B-Agent, B-Patient, B-Goal). The dependency parsing annotations indicate the grammatical relationships between the words in the sentence. The POS tagging annotations indicate the part-of-speech (e.g., N for noun, V for verb, A for adjective) of each word in the sentence. The main source of the data is generated through structured questions to Google Bard in the final week of November 2023, and many of these generating queries can be found as title names of individual small tables. Much of that portion was structured explicitly by having the prompt instructions including the previous paragraph's data structure explanation. This resulted in potentially much more useful data about the sentences or words from an NLP perspective, albiet with more inconsistency and minor errors, and even information entered occasionally into the wrong field, in those NLP related fields mentioned above. This data is mostly uncleaned and should be used with the understanding that it was largely uncleaned and gathered from various sources. The data in the tables in this dataset has inherently been filtered by the guardrails present in Bard and through careful observation of the data (rejecting obviously errorous generations) as it was being generated by Bard and uploaded to, minimally processed the dataset. This is a disclaimer of any possible error or omission, and the dataset should be useful with this understanding. Some of the tables or observations are missing entries for some of those fields (with the exception of a Portuguese term or sentence, which is present in every observation) especially POS Tagging Annotations, and Dependency Parsing Annotations, usually due to reaching Bard's data limit in it's public browser version (as of November 28th 2023.) Strong focus towards moral compass and ethical real-world problems solving, as well as Indigenous Knowledge Systems, Climate Change, Science, STEM, intellectual property from a Copyleft perspective, some simple aspects of law, Indigenous Archeology, Educational Philosophy, and basic Vocabulary.
Solshine/Portuguese-English_MPC_for_NLP_UNCLEANED
[ "license:mit", "region:us" ]
2023-11-29T05:16:53+00:00
{"license": "mit"}
2023-12-03T08:16:50+00:00
[]
[]
TAGS #license-mit #region-us
Portuguese and English Translations of readme are available here. ---- UNCLEANED VERSION - XLS format, which could be combined and cleaned Combined and partially cleaned, partially reformatted, partial ETL version is available at Solshine/Portuguese-English-Vocab-PartiallyTransformed *English Readme:* Bridging Language Barriers and Empowering Marginalized Communities through Multilingual Parallel Corpora: The English-Portuguese MPC In the realm of natural language processing (NLP), the availability of high-quality language data is crucial for developing effective NLP models. However, marginalized indigenous communities, particularly those in Brazil and the Amazon Rainforest, often lack access to NLP tools and resources, hindering their ability to fully engage in the digital age. To address this disparity, we propose the creation of an English-Portuguese Multilingual Parallel Corpus (MPC), a collection of carefully curated parallel text pairs in English and Portuguese. This resource is specifically designed to democratize access to NLP and promote knowledge exchange between these two languages, fostering cross-cultural communication and empowering marginalized communities. Democratizing Access and Fostering Cross-Pollination This English-Portuguese MPC, Solshine/Portuguese-English-Translation-and-NLP-trainingdata-UNCLEANED , serves as a bridge between these two languages, enabling the development of more robust machine translation systems and enriching the availability of Portuguese language resources. For marginalized indigenous communities, this resource holds the potential to: * Preserve and revitalize indigenous languages: By providing a benchmark for Portuguese-English translation, the MPC can facilitate the development of tools that can translate indigenous languages into Portuguese, aiding in language preservation and revitalization efforts. * Empower indigenous communities in the digital sphere: Access to NLP tools trained on the MPC can enable indigenous communities to engage with online resources, participate in social media, and access information in their native language or Portuguese. * Promote cross-cultural understanding: By fostering better communication between indigenous communities and the wider world, the MPC can help bridge cultural divides and promote mutual understanding. Addressing the Needs of Marginalized Indigenous Communities The English-Portuguese MPC is particularly relevant to marginalized indigenous communities in Brazil and the Amazon Rainforest, where Portuguese serves as the lingua franca and numerous indigenous languages are spoken. By providing a comprehensive resource for Portuguese-English translation, the MPC can empower these communities to: * Participate in education and research: Indigenous students and researchers can access academic materials and engage in scientific research using NLP tools trained on the MPC. * Promote cultural heritage and storytelling: Indigenous communities can utilize the MPC to translate their cultural narratives, folklore, and traditional knowledge into Portuguese, preserving and sharing their heritage with a wider audience. * Advocate for their rights and interests: Indigenous communities can use the MPC to communicate effectively with government officials, NGOs, and the international community, advocating for their rights and interests. Sourcing the MPC through Conversation with Bard The English-Portuguese MPC was sourced through a combination of conversation with Bard and other sources. Bard's ability to understand and generate human language made it an invaluable tool for generating and refining the dataset, ensuring that the translations were accurate and natural-sounding. Once the data was collected, it was carefully preprocessed and aligned to ensure that the sentences in English and Portuguese were truly parallel. This was a critical step, as it ensured that the MPC could be used to train NLP models that could accurately translate between the two languages. Conclusion The English-Portuguese MPC here is a valuable resource for NLP researchers and developers. It can be used to train machine translation systems, develop text summarization tools, and create sentiment analysis models. The MPC can also be used to study the relationship between English and Portuguese, and to develop new NLP algorithms. Moreover, the MPC empowers marginalized communities to preserve their languages, engage in the digital sphere, and advocate for their rights and interests. ---- *Portuguese Readme:* Superando barreiras linguísticas e empoderando comunidades marginalizadas por meio de corpora paralelos multilíngues: O MPC inglês-português No campo do processamento de linguagem natural (PLN), a disponibilidade de dados linguísticos de alta qualidade é crucial para o desenvolvimento de modelos de PLN eficazes. No entanto, comunidades indígenas marginalizadas, particularmente aquelas no Brasil e na Amazônia, muitas vezes não têm acesso a ferramentas e recursos de PLN, dificultando sua capacidade de se envolver plenamente na era digital. Para lidar com essa disparidade, propomos a criação de um Corpus Paralelo Multilíngue (MPC) inglês-português, uma coleção de pares de texto paralelos cuidadosamente selecionados em inglês e português. Este recurso é projetado especificamente para democratizar o acesso ao PLN e promover o intercâmbio de conhecimento entre esses dois idiomas, fomentando a comunicação intercultural e empoderando comunidades marginalizadas. Democratizando o acesso e fomentando a polinização cruzada O MPC inglês-português, Solshine/Portuguese-English-Translation-and-NLP-trainingdata-UNCLEANED , serve como uma ponte entre esses dois idiomas, possibilitando o desenvolvimento de sistemas de tradução automática mais robustos e enriquecendo a disponibilidade de recursos em português. Para comunidades indígenas marginalizadas, este recurso tem o potencial de: * Preservar e revitalizar línguas indígenas: Ao fornecer um benchmark para a tradução português-inglês, o MPC pode facilitar o desenvolvimento de ferramentas que podem traduzir línguas indígenas para o português, auxiliando nos esforços de preservação e revitalização da língua. * Empoderar comunidades indígenas na esfera digital: O acesso a ferramentas de PLN treinadas no MPC pode capacitar comunidades indígenas a se envolver com recursos online, participar de mídias sociais e acessar informações em sua língua nativa ou português. * Promover a compreensão intercultural: Ao promover uma melhor comunicação entre as comunidades indígenas e o mundo em geral, o MPC pode ajudar a superar divisões culturais e promover a compreensão mútua. Abordando as necessidades de comunidades indígenas marginalizadas O MPC inglês-português é particularmente relevante para comunidades indígenas marginalizadas no Brasil e na Amazônia, onde o português serve como língua franca e inúmeras línguas indígenas são faladas. Ao fornecer um recurso abrangente para tradução português-inglês, o MPC pode capacitar essas comunidades a: * Participar de educação e pesquisa: Estudantes e pesquisadores indígenas podem acessar materiais acadêmicos e se envolver em pesquisas científicas usando ferramentas de PLN treinadas no MPC. * Promover o patrimônio cultural e a narrativa: Comunidades indígenas podem utilizar o MPC para traduzir suas narrativas culturais, folclore e conhecimento tradicional para o português, preservando e compartilhando seu patrimônio com um público mais amplo. * Defender seus direitos e interesses: As comunidades indígenas podem usar o MPC para se comunicar efetivamente com autoridades governamentais, ONGs e a comunidade internacional, defendendo seus direitos e interesses. Obtendo o MPC por meio de conversas com Bard O MPC inglês-português foi obtido por meio de uma combinação de conversas com Bard e outras fontes. A capacidade do Bard de entender e gerar linguagem humana o tornou uma ferramenta inestimável para gerar e refinar o conjunto de dados, garantindo que as traduções fossem precisas e naturais. Uma vez que os dados foram coletados, eles foram cuidadosamente pré-processados e alinhados para garantir que as frases em inglês e português fossem verdadeiramente paralelas. Esta foi uma etapa crítica, pois garantiu que o MPC pudesse ser usado para treinar modelos de PLN que pudessem traduzir com precisão entre os dois idiomas. Conclusão O MPC inglês-português é um recurso valioso para pesquisadores e desenvolvedores de PLN. Ele pode ser usado para treinar sistemas de tradução automática, desenvolver ferramentas de resumo de texto e criar modelos de análise de sentimento. O MPC também pode ser usado para estudar a relação entre inglês e português e desenvolver novos algoritmos de PLN. Além disso, o MPC capacita comunidades marginalizadas a preservar seus idiomas, se envolver na esfera digital e defender seus direitos e interesses. ---- Notes about the data (currently being updated): Inspired by the (November 2023) developments of Ocra2 and the ability for large LLMs to now produce training datasets for smaller (ie 7B or 3B) models to efficienctly learn and distill the fundamentals of the knowledge into themselves. This is a great Portuguese language dataset, connecting Portuguese into the most widely used and trained language (English), thus democratizing access. You are encouraged to use this in your training to enrich the model's Portuguese. Most of the table include: G3 Annotations, B- Tags , SRL Annotations, Dependency Parsing Annotations , POS Tagging Annotations Please note for a large portion of the data: The NER annotations (G3) indicate general entities, while the B- tags indicate specific types of entities (e.g., B-Location, B-Time). The SRL annotations indicate the semantic roles of the constituents in the sentence (e.g., B-Theme, B-Agent, B-Patient, B-Goal). The dependency parsing annotations indicate the grammatical relationships between the words in the sentence. The POS tagging annotations indicate the part-of-speech (e.g., N for noun, V for verb, A for adjective) of each word in the sentence. The main source of the data is generated through structured questions to Google Bard in the final week of November 2023, and many of these generating queries can be found as title names of individual small tables. Much of that portion was structured explicitly by having the prompt instructions including the previous paragraph's data structure explanation. This resulted in potentially much more useful data about the sentences or words from an NLP perspective, albiet with more inconsistency and minor errors, and even information entered occasionally into the wrong field, in those NLP related fields mentioned above. This data is mostly uncleaned and should be used with the understanding that it was largely uncleaned and gathered from various sources. The data in the tables in this dataset has inherently been filtered by the guardrails present in Bard and through careful observation of the data (rejecting obviously errorous generations) as it was being generated by Bard and uploaded to, minimally processed the dataset. This is a disclaimer of any possible error or omission, and the dataset should be useful with this understanding. Some of the tables or observations are missing entries for some of those fields (with the exception of a Portuguese term or sentence, which is present in every observation) especially POS Tagging Annotations, and Dependency Parsing Annotations, usually due to reaching Bard's data limit in it's public browser version (as of November 28th 2023.) Strong focus towards moral compass and ethical real-world problems solving, as well as Indigenous Knowledge Systems, Climate Change, Science, STEM, intellectual property from a Copyleft perspective, some simple aspects of law, Indigenous Archeology, Educational Philosophy, and basic Vocabulary.
[]
[ "TAGS\n#license-mit #region-us \n" ]
[ 11 ]
[ "passage: TAGS\n#license-mit #region-us \n" ]
1a8a3f88c959b057ddd44c8bcef6846f10df9104
# Dataset Card for "RedPajama-combined-15B-6K-llama" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
awettig/RedPajama-combined-15B-6K-llama
[ "region:us" ]
2023-11-29T05:23:32+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "test", "num_bytes": 1422094968, "num_examples": 17802}, {"name": "train", "num_bytes": 192480977304, "num_examples": 2409506}], "download_size": 577654462, "dataset_size": 193903072272}}
2023-11-29T16:45:09+00:00
[]
[]
TAGS #region-us
# Dataset Card for "RedPajama-combined-15B-6K-llama" More Information needed
[ "# Dataset Card for \"RedPajama-combined-15B-6K-llama\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"RedPajama-combined-15B-6K-llama\"\n\nMore Information needed" ]
[ 6, 24 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"RedPajama-combined-15B-6K-llama\"\n\nMore Information needed" ]
4173bee130948ad01d2412694c0e6ff515c08aa2
---
Jession01/English-Cebuano-Translation
[ "task_categories:text-generation", "language:en", "license:apache-2.0", "region:us" ]
2023-11-29T05:24:24+00:00
{"language": ["en"], "license": "apache-2.0", "task_categories": ["text-generation"]}
2023-11-30T01:26:55+00:00
[]
[ "en" ]
TAGS #task_categories-text-generation #language-English #license-apache-2.0 #region-us
---
[]
[ "TAGS\n#task_categories-text-generation #language-English #license-apache-2.0 #region-us \n" ]
[ 29 ]
[ "passage: TAGS\n#task_categories-text-generation #language-English #license-apache-2.0 #region-us \n" ]
bc8519dde97c31462d6d55815c6f3da3a6d538b5
# Dataset Card for "RedPajama-combined-15B-8K-llama" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
awettig/RedPajama-combined-15B-8k-llama
[ "region:us" ]
2023-11-29T05:26:03+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "test", "num_bytes": 3181819992, "num_examples": 29874}, {"name": "train", "num_bytes": 188533325564, "num_examples": 1770133}], "download_size": 0, "dataset_size": 191715145556}}
2023-11-30T07:16:59+00:00
[]
[]
TAGS #region-us
# Dataset Card for "RedPajama-combined-15B-8K-llama" More Information needed
[ "# Dataset Card for \"RedPajama-combined-15B-8K-llama\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"RedPajama-combined-15B-8K-llama\"\n\nMore Information needed" ]
[ 6, 24 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"RedPajama-combined-15B-8K-llama\"\n\nMore Information needed" ]
60c4c8e207a82d7ba693767a26a036dc1af7877e
# Dataset Card for "ImageSearch" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
jarvisx17/ImageSearch
[ "region:us" ]
2023-11-29T05:26:29+00:00
{"dataset_info": {"features": [{"name": "ID", "dtype": "string"}, {"name": "Filename", "dtype": "string"}, {"name": "link", "dtype": "string"}, {"name": "embeddings", "sequence": "float64"}], "splits": [{"name": "train", "num_bytes": 168391982, "num_examples": 31196}], "download_size": 135058186, "dataset_size": 168391982}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-29T05:26:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ImageSearch" More Information needed
[ "# Dataset Card for \"ImageSearch\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ImageSearch\"\n\nMore Information needed" ]
[ 6, 12 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ImageSearch\"\n\nMore Information needed" ]
2dcdd05ca76ca3033433b78cc696459884d16977
### Load Dataset ```shell pip install datasets ``` ```python from datasets import load_dataset electricity = load_dataset("worldboss/electricity-prices", split="train") pd.DataFrame(electricity).head() ``` ### Author The data was constructed by Theophilus Siameh ([email protected]).
worldboss/electricity-prices
[ "task_categories:text-generation", "size_categories:1K<n<10K", "language:en", "license:afl-3.0", "ghana", "electricity", "region:us" ]
2023-11-29T05:46:29+00:00
{"language": ["en"], "license": "afl-3.0", "size_categories": ["1K<n<10K"], "task_categories": ["text-generation"], "tags": ["ghana", "electricity"], "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "Oper Day", "dtype": "string"}, {"name": "Hour Ending", "dtype": "int64"}, {"name": "HB_BUSAVG", "dtype": "float64"}, {"name": "HB_HOUSTON", "dtype": "float64"}, {"name": "HB_HUBAVG", "dtype": "float64"}, {"name": "HB_NORTH", "dtype": "float64"}, {"name": "HB_PAN", "dtype": "float64"}, {"name": "HB_SOUTH", "dtype": "float64"}, {"name": "HB_WEST", "dtype": "float64"}, {"name": "LZ_AEN", "dtype": "float64"}, {"name": "LZ_CPS", "dtype": "float64"}, {"name": "LZ_HOUSTON", "dtype": "float64"}, {"name": "LZ_LCRA", "dtype": "float64"}, {"name": "LZ_NORTH", "dtype": "float64"}, {"name": "LZ_RAYBN", "dtype": "float64"}, {"name": "LZ_SOUTH", "dtype": "float64"}, {"name": "LZ_WEST", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 23352, "num_examples": 168}], "download_size": 24020, "dataset_size": 23352}}
2023-11-29T05:52:38+00:00
[]
[ "en" ]
TAGS #task_categories-text-generation #size_categories-1K<n<10K #language-English #license-afl-3.0 #ghana #electricity #region-us
### Load Dataset ### Author The data was constructed by Theophilus Siameh (theodondre@URL).
[ "### Load Dataset", "### Author\nThe data was constructed by Theophilus Siameh (theodondre@URL)." ]
[ "TAGS\n#task_categories-text-generation #size_categories-1K<n<10K #language-English #license-afl-3.0 #ghana #electricity #region-us \n", "### Load Dataset", "### Author\nThe data was constructed by Theophilus Siameh (theodondre@URL)." ]
[ 47, 6, 22 ]
[ "passage: TAGS\n#task_categories-text-generation #size_categories-1K<n<10K #language-English #license-afl-3.0 #ghana #electricity #region-us \n### Load Dataset### Author\nThe data was constructed by Theophilus Siameh (theodondre@URL)." ]
70c47fd6decd9ad79649ff56ca34847b622982a7
This dataset contains almost the entire history of the /r/puertorico subreddit (2012-2021), filtered into comment/reply pairs. Childness comments are omitted, and so is the OP content. Format: ChatML Disclaimer: Content has not been filtered so if you look for things that you don't like, you are sure to find them. TODO: - add OP to dataset as "top-level" comment. - create simple completion dataset that includes everything - clean up dataset. There's still a bunch of bot responses, and removed/deleted comments.
unclegravity/puertorico-reddit
[ "region:us" ]
2023-11-29T05:54:58+00:00
{}
2023-12-16T07:02:06+00:00
[]
[]
TAGS #region-us
This dataset contains almost the entire history of the /r/puertorico subreddit (2012-2021), filtered into comment/reply pairs. Childness comments are omitted, and so is the OP content. Format: ChatML Disclaimer: Content has not been filtered so if you look for things that you don't like, you are sure to find them. TODO: - add OP to dataset as "top-level" comment. - create simple completion dataset that includes everything - clean up dataset. There's still a bunch of bot responses, and removed/deleted comments.
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
8a394d4e841485a66e0ae5ec8858f6cdddec3add
Dataset presented in the paper "Table Detection in the Wild: A Novel Diverse Table Detection Dataset and Method" https://arxiv.org/abs/2209.09207 If you use this dataset, please cite this paper: ``` Haloi, M., Shekhar, S., Fande, N., & Dash, S. S. (2022). Table Detection in the Wild: A Novel Diverse Table Detection Dataset and Method. arXiv preprint arXiv:2209.09207. ```
n3011/STDW
[ "license:mit", "arxiv:2209.09207", "region:us" ]
2023-11-29T06:20:59+00:00
{"license": "mit", "dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 7332835310.55, "num_examples": 7470}], "download_size": 6054347502, "dataset_size": 7332835310.55}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-30T06:22:06+00:00
[ "2209.09207" ]
[]
TAGS #license-mit #arxiv-2209.09207 #region-us
Dataset presented in the paper "Table Detection in the Wild: A Novel Diverse Table Detection Dataset and Method" URL If you use this dataset, please cite this paper:
[]
[ "TAGS\n#license-mit #arxiv-2209.09207 #region-us \n" ]
[ 20 ]
[ "passage: TAGS\n#license-mit #arxiv-2209.09207 #region-us \n" ]
2a1086255d9a8c2df903c79a2324764d648991db
This dataset is a fork of [counsel-chat](https://huggingface.co/datasets/nbertagnolli/counsel-chat) modified to adhere to the alpaca instruction format. `alpaca`: instruction; input(optional) ```json {"instruction": "...", "input": "...", "output": "..."} ```
steve-cse/counsel_chat
[ "region:us" ]
2023-11-29T06:37:27+00:00
{}
2024-01-02T15:08:44+00:00
[]
[]
TAGS #region-us
This dataset is a fork of counsel-chat modified to adhere to the alpaca instruction format. 'alpaca': instruction; input(optional)
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
60662f71dafe12fad38d6e2cd2af125668b02043
{ "from": "human", "value": "Your name is Su Wen" }
krisrod/test_name
[ "license:llama2", "region:us" ]
2023-11-29T06:40:49+00:00
{"license": "llama2"}
2023-11-29T06:59:57+00:00
[]
[]
TAGS #license-llama2 #region-us
{ "from": "human", "value": "Your name is Su Wen" }
[]
[ "TAGS\n#license-llama2 #region-us \n" ]
[ 13 ]
[ "passage: TAGS\n#license-llama2 #region-us \n" ]
ef2b1a2a40ac78751a1fc37f377c16ca4a706471
# Dataset Card for "Camera-Blur" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Adi-0-0-Gupta/Camera-Blur
[ "region:us" ]
2023-11-29T06:47:25+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "valid", "path": "data/valid-*"}]}], "dataset_info": {"features": [{"name": "image", "struct": [{"name": "bytes", "dtype": "binary"}, {"name": "path", "dtype": "null"}]}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 357658018, "num_examples": 288}, {"name": "valid", "num_bytes": 79675207, "num_examples": 79}], "download_size": 431693779, "dataset_size": 437333225}}
2023-12-14T08:04:45+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Camera-Blur" More Information needed
[ "# Dataset Card for \"Camera-Blur\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Camera-Blur\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"Camera-Blur\"\n\nMore Information needed" ]
9107887a43766c770fe3e639ecab55fe08245e72
## 句子翻译指令数据集 其中包含**机器翻译**数据集,也包含**汉语文言文与白话文之间的翻译**数据集。 在做[qgyd2021/few_shot_intent_sft](https://huggingface.co/datasets/qgyd2021/few_shot_intent_sft)时,我意识到可能需要同时让模型具有翻译的能力以实现知识在不同语言之间的传递,因此决定制作此数据集。
qgyd2021/few_shot_translation_sft
[ "task_categories:question-answering", "task_categories:translation", "task_categories:conversational", "task_categories:text-generation", "task_categories:text2text-generation", "size_categories:100M<n<1B", "language:zh", "language:en", "license:apache-2.0", "region:us" ]
2023-11-29T06:50:22+00:00
{"language": ["zh", "en"], "license": "apache-2.0", "size_categories": ["100M<n<1B"], "task_categories": ["question-answering", "translation", "conversational", "text-generation", "text2text-generation"]}
2023-11-29T06:54:23+00:00
[]
[ "zh", "en" ]
TAGS #task_categories-question-answering #task_categories-translation #task_categories-conversational #task_categories-text-generation #task_categories-text2text-generation #size_categories-100M<n<1B #language-Chinese #language-English #license-apache-2.0 #region-us
## 句子翻译指令数据集 其中包含机器翻译数据集,也包含汉语文言文与白话文之间的翻译数据集。 在做qgyd2021/few_shot_intent_sft时,我意识到可能需要同时让模型具有翻译的能力以实现知识在不同语言之间的传递,因此决定制作此数据集。
[ "## 句子翻译指令数据集\n\n其中包含机器翻译数据集,也包含汉语文言文与白话文之间的翻译数据集。\n\n在做qgyd2021/few_shot_intent_sft时,我意识到可能需要同时让模型具有翻译的能力以实现知识在不同语言之间的传递,因此决定制作此数据集。" ]
[ "TAGS\n#task_categories-question-answering #task_categories-translation #task_categories-conversational #task_categories-text-generation #task_categories-text2text-generation #size_categories-100M<n<1B #language-Chinese #language-English #license-apache-2.0 #region-us \n", "## 句子翻译指令数据集\n\n其中包含机器翻译数据集,也包含汉语文言文与白话文之间的翻译数据集。\n\n在做qgyd2021/few_shot_intent_sft时,我意识到可能需要同时让模型具有翻译的能力以实现知识在不同语言之间的传递,因此决定制作此数据集。" ]
[ 90, 76 ]
[ "passage: TAGS\n#task_categories-question-answering #task_categories-translation #task_categories-conversational #task_categories-text-generation #task_categories-text2text-generation #size_categories-100M<n<1B #language-Chinese #language-English #license-apache-2.0 #region-us \n## 句子翻译指令数据集\n\n其中包含机器翻译数据集,也包含汉语文言文与白话文之间的翻译数据集。\n\n在做qgyd2021/few_shot_intent_sft时,我意识到可能需要同时让模型具有翻译的能力以实现知识在不同语言之间的传递,因此决定制作此数据集。" ]
af366dc5d172c42d24253d76cce452bcb74a320e
This is the dataset of *Moir´e Photo Restoration Using Multiresolution Convolutional Neural Networks*.
zxbsmk/TIP-2018
[ "task_categories:image-to-image", "size_categories:100K<n<1M", "language:en", "license:mit", "region:us" ]
2023-11-29T06:51:44+00:00
{"language": ["en"], "license": "mit", "size_categories": ["100K<n<1M"], "task_categories": ["image-to-image"], "viewer": false}
2023-12-01T13:24:43+00:00
[]
[ "en" ]
TAGS #task_categories-image-to-image #size_categories-100K<n<1M #language-English #license-mit #region-us
This is the dataset of *Moir´e Photo Restoration Using Multiresolution Convolutional Neural Networks*.
[]
[ "TAGS\n#task_categories-image-to-image #size_categories-100K<n<1M #language-English #license-mit #region-us \n" ]
[ 39 ]
[ "passage: TAGS\n#task_categories-image-to-image #size_categories-100K<n<1M #language-English #license-mit #region-us \n" ]
31aae750c0ac578e71d3e6b8afd390f285f50792
The implicit toxicity data used in our paper: > Unveiling Implicit Toxicity in Large Language Models.
jiaxin-wen/Implicit-Toxicity
[ "region:us" ]
2023-11-29T07:06:37+00:00
{}
2023-11-29T07:31:32+00:00
[]
[]
TAGS #region-us
The implicit toxicity data used in our paper: > Unveiling Implicit Toxicity in Large Language Models.
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
a422effc987977b876ac71750928d6eee5652416
Word-level Images for 5 language(En, Fr, De, Zh, Cs)
BrianFu1105/Word-levelImage
[ "region:us" ]
2023-11-29T07:23:05+00:00
{}
2023-11-29T07:39:38+00:00
[]
[]
TAGS #region-us
Word-level Images for 5 language(En, Fr, De, Zh, Cs)
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
3567527737430becd85eddc9d838f7f083216b39
# MultiFactor-HotpotQA-SuppFacts <!-- Provide a quick summary of the dataset. --> The MultiFactor datasets -- HotpotQA-Supporting Facts part in EMNLP 2023 Findings: [*Improving Question Generation with Multi-level Content Planning*](https://arxiv.org/abs/2310.13512). ## 1. Dataset Details ### 1.1 Dataset Description Supporting Facts setting on HotpotQA dataset [1] in EMNLP 2023 Findings: [*Improving Question Generation with Multi-level Content Planning*](https://arxiv.org/abs/2310.13512). Based on the dataset provided in [CQG](https://github.com/sion-zcfei/cqg) [2], we add the `p_hrase`, `n_phrase` and `full answer` attributes for every dataset instance. The full answer is reconstructed with [QA2D](https://github.com/kelvinguu/qanli) [3]. More details are in paper github: https://github.com/zeaver/MultiFactor. ### 1.2 Dataset Sources <!-- Provide the basic links for the dataset. --> - **Repository:** https://github.com/zeaver/MultiFactor - **Paper:** [*Improving Question Generation with Multi-level Content Planning*](https://arxiv.org/abs/2310.13512). EMNLP Findings, 2023. ## 2. Dataset Structure ```tex . ├── dev.json ├── test.json ├── train.json ├── fa_model_inference ├── dev.json ├── test.json └── train.json ``` Each split is a json file, not jsonl. Please load it with `json.load(f)` directly. And the dataset schema is: ```json { "context": "the given input context", "answer": "the given answer", "question": "the corresponding question", "p_phrase": "the postive phrases in the given context", "n_phrase": "the negative phrases", "full answer": "pseudo-gold full answer (q + a -> a declarative sentence)", } ``` We also provide the *FA_Model*'s inference results in `fa_model_inference/{split}.json`. ## 3. Dataset Card Contact If you have any question, feel free to contact with me: [email protected] ## Reference [1] Yang, Zhilin, et al. [HotpotQA: A Dataset for Diverse, Explainable Multi-hop Question Answering](https://arxiv.org/abs/1809.09600). EMNLP, 2018. [2] Fei, Zichu, et al. [CQG: A Simple and Effective Controlled Generation Framework for Multi-Hop Question Generation](https://aclanthology.org/2022.acl-long.475/). ACL, 2022. [3] Demszky, Dorottya, et al. [Transforming Question Answering Datasets Into Natural Language Inference Datasets](https://arxiv.org/abs/1809.02922). Stanford University. arXiv, 2018.
zeaver/multifactor_hotpotqa_suppfacts
[ "task_categories:text-generation", "task_categories:question-answering", "size_categories:10K<n<100K", "language:en", "license:mit", "question-generation", "HotpotQA", "arxiv:2310.13512", "arxiv:1809.09600", "arxiv:1809.02922", "region:us" ]
2023-11-29T07:44:10+00:00
{"language": ["en"], "license": "mit", "size_categories": ["10K<n<100K"], "task_categories": ["text-generation", "question-answering"], "tags": ["question-generation", "HotpotQA"]}
2023-11-29T12:01:23+00:00
[ "2310.13512", "1809.09600", "1809.02922" ]
[ "en" ]
TAGS #task_categories-text-generation #task_categories-question-answering #size_categories-10K<n<100K #language-English #license-mit #question-generation #HotpotQA #arxiv-2310.13512 #arxiv-1809.09600 #arxiv-1809.02922 #region-us
# MultiFactor-HotpotQA-SuppFacts The MultiFactor datasets -- HotpotQA-Supporting Facts part in EMNLP 2023 Findings: *Improving Question Generation with Multi-level Content Planning*. ## 1. Dataset Details ### 1.1 Dataset Description Supporting Facts setting on HotpotQA dataset [1] in EMNLP 2023 Findings: *Improving Question Generation with Multi-level Content Planning*. Based on the dataset provided in CQG [2], we add the 'p_hrase', 'n_phrase' and 'full answer' attributes for every dataset instance. The full answer is reconstructed with QA2D [3]. More details are in paper github: URL ### 1.2 Dataset Sources - Repository: URL - Paper: *Improving Question Generation with Multi-level Content Planning*. EMNLP Findings, 2023. ## 2. Dataset Structure Each split is a json file, not jsonl. Please load it with 'URL(f)' directly. And the dataset schema is: We also provide the *FA_Model*'s inference results in 'fa_model_inference/{split}.json'. ## 3. Dataset Card Contact If you have any question, feel free to contact with me: zehua.xia1999@URL ## Reference [1] Yang, Zhilin, et al. HotpotQA: A Dataset for Diverse, Explainable Multi-hop Question Answering. EMNLP, 2018. [2] Fei, Zichu, et al. CQG: A Simple and Effective Controlled Generation Framework for Multi-Hop Question Generation. ACL, 2022. [3] Demszky, Dorottya, et al. Transforming Question Answering Datasets Into Natural Language Inference Datasets. Stanford University. arXiv, 2018.
[ "# MultiFactor-HotpotQA-SuppFacts\n\n\n\nThe MultiFactor datasets -- HotpotQA-Supporting Facts part in EMNLP 2023 Findings: *Improving Question Generation with Multi-level Content Planning*.", "## 1. Dataset Details", "### 1.1 Dataset Description\n\nSupporting Facts setting on HotpotQA dataset [1] in EMNLP 2023 Findings: *Improving Question Generation with Multi-level Content Planning*. \n\nBased on the dataset provided in CQG [2], we add the 'p_hrase', 'n_phrase' and 'full answer' attributes for every dataset instance. \nThe full answer is reconstructed with QA2D [3]. More details are in paper github: URL", "### 1.2 Dataset Sources\n\n\n\n- Repository: URL\n- Paper: *Improving Question Generation with Multi-level Content Planning*. EMNLP Findings, 2023.", "## 2. Dataset Structure\n\n\nEach split is a json file, not jsonl. Please load it with 'URL(f)' directly. And the dataset schema is:\n\n\n\nWe also provide the *FA_Model*'s inference results in 'fa_model_inference/{split}.json'.", "## 3. Dataset Card Contact\n\nIf you have any question, feel free to contact with me: zehua.xia1999@URL", "## Reference\n[1] Yang, Zhilin, et al. HotpotQA: A Dataset for Diverse, Explainable Multi-hop Question Answering. EMNLP, 2018.\n\n[2] Fei, Zichu, et al. CQG: A Simple and Effective Controlled Generation Framework for Multi-Hop Question Generation. ACL, 2022.\n\n[3] Demszky, Dorottya, et al. Transforming Question Answering Datasets Into Natural Language Inference Datasets. Stanford University. arXiv, 2018." ]
[ "TAGS\n#task_categories-text-generation #task_categories-question-answering #size_categories-10K<n<100K #language-English #license-mit #question-generation #HotpotQA #arxiv-2310.13512 #arxiv-1809.09600 #arxiv-1809.02922 #region-us \n", "# MultiFactor-HotpotQA-SuppFacts\n\n\n\nThe MultiFactor datasets -- HotpotQA-Supporting Facts part in EMNLP 2023 Findings: *Improving Question Generation with Multi-level Content Planning*.", "## 1. Dataset Details", "### 1.1 Dataset Description\n\nSupporting Facts setting on HotpotQA dataset [1] in EMNLP 2023 Findings: *Improving Question Generation with Multi-level Content Planning*. \n\nBased on the dataset provided in CQG [2], we add the 'p_hrase', 'n_phrase' and 'full answer' attributes for every dataset instance. \nThe full answer is reconstructed with QA2D [3]. More details are in paper github: URL", "### 1.2 Dataset Sources\n\n\n\n- Repository: URL\n- Paper: *Improving Question Generation with Multi-level Content Planning*. EMNLP Findings, 2023.", "## 2. Dataset Structure\n\n\nEach split is a json file, not jsonl. Please load it with 'URL(f)' directly. And the dataset schema is:\n\n\n\nWe also provide the *FA_Model*'s inference results in 'fa_model_inference/{split}.json'.", "## 3. Dataset Card Contact\n\nIf you have any question, feel free to contact with me: zehua.xia1999@URL", "## Reference\n[1] Yang, Zhilin, et al. HotpotQA: A Dataset for Diverse, Explainable Multi-hop Question Answering. EMNLP, 2018.\n\n[2] Fei, Zichu, et al. CQG: A Simple and Effective Controlled Generation Framework for Multi-Hop Question Generation. ACL, 2022.\n\n[3] Demszky, Dorottya, et al. Transforming Question Answering Datasets Into Natural Language Inference Datasets. Stanford University. arXiv, 2018." ]
[ 87, 56, 5, 105, 39, 72, 26, 112 ]
[ "passage: TAGS\n#task_categories-text-generation #task_categories-question-answering #size_categories-10K<n<100K #language-English #license-mit #question-generation #HotpotQA #arxiv-2310.13512 #arxiv-1809.09600 #arxiv-1809.02922 #region-us \n# MultiFactor-HotpotQA-SuppFacts\n\n\n\nThe MultiFactor datasets -- HotpotQA-Supporting Facts part in EMNLP 2023 Findings: *Improving Question Generation with Multi-level Content Planning*.## 1. Dataset Details### 1.1 Dataset Description\n\nSupporting Facts setting on HotpotQA dataset [1] in EMNLP 2023 Findings: *Improving Question Generation with Multi-level Content Planning*. \n\nBased on the dataset provided in CQG [2], we add the 'p_hrase', 'n_phrase' and 'full answer' attributes for every dataset instance. \nThe full answer is reconstructed with QA2D [3]. More details are in paper github: URL### 1.2 Dataset Sources\n\n\n\n- Repository: URL\n- Paper: *Improving Question Generation with Multi-level Content Planning*. EMNLP Findings, 2023.## 2. Dataset Structure\n\n\nEach split is a json file, not jsonl. Please load it with 'URL(f)' directly. And the dataset schema is:\n\n\n\nWe also provide the *FA_Model*'s inference results in 'fa_model_inference/{split}.json'.## 3. Dataset Card Contact\n\nIf you have any question, feel free to contact with me: zehua.xia1999@URL## Reference\n[1] Yang, Zhilin, et al. HotpotQA: A Dataset for Diverse, Explainable Multi-hop Question Answering. EMNLP, 2018.\n\n[2] Fei, Zichu, et al. CQG: A Simple and Effective Controlled Generation Framework for Multi-Hop Question Generation. ACL, 2022.\n\n[3] Demszky, Dorottya, et al. Transforming Question Answering Datasets Into Natural Language Inference Datasets. Stanford University. arXiv, 2018." ]
b01cf1cc74db572ca45315001c122837e8095068
### Dataset description This dataset contains both English and Vietnamese samples. Each sample has 3 solutions including a coding solution, an explanation solution and a direct solution. They are distinguished by the tokens \<sol\>, \<exp\> and \<dir\>, respectively. For coding solution, execute the Python code wrapped inside ``` for the output result. DON'T TRUST the choice generated by model after coding, just execute the code and map it to the choice. With this dataset & QLoRA on Tora-Code-13b, you can archieve 73% accuracy in the public test of Vietnamese Elemetary Maths - Zalo AI Challenge 2023.
thanhdath/vietnamese-mathcoder
[ "region:us" ]
2023-11-29T07:45:53+00:00
{"dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 21151768, "num_examples": 22825}], "download_size": 7937515, "dataset_size": 21151768}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-12-25T14:55:11+00:00
[]
[]
TAGS #region-us
### Dataset description This dataset contains both English and Vietnamese samples. Each sample has 3 solutions including a coding solution, an explanation solution and a direct solution. They are distinguished by the tokens \<sol\>, \<exp\> and \<dir\>, respectively. For coding solution, execute the Python code wrapped inside ''' for the output result. DON'T TRUST the choice generated by model after coding, just execute the code and map it to the choice. With this dataset & QLoRA on Tora-Code-13b, you can archieve 73% accuracy in the public test of Vietnamese Elemetary Maths - Zalo AI Challenge 2023.
[ "### Dataset description\nThis dataset contains both English and Vietnamese samples.\n\nEach sample has 3 solutions including a coding solution, an explanation solution and a direct solution. They are distinguished by the tokens \\<sol\\>, \\<exp\\> and \\<dir\\>, respectively.\n\nFor coding solution, execute the Python code wrapped inside ''' for the output result. DON'T TRUST the choice generated by model after coding, just execute the code and map it to the choice.\n\nWith this dataset & QLoRA on Tora-Code-13b, you can archieve 73% accuracy in the public test of Vietnamese Elemetary Maths - Zalo AI Challenge 2023." ]
[ "TAGS\n#region-us \n", "### Dataset description\nThis dataset contains both English and Vietnamese samples.\n\nEach sample has 3 solutions including a coding solution, an explanation solution and a direct solution. They are distinguished by the tokens \\<sol\\>, \\<exp\\> and \\<dir\\>, respectively.\n\nFor coding solution, execute the Python code wrapped inside ''' for the output result. DON'T TRUST the choice generated by model after coding, just execute the code and map it to the choice.\n\nWith this dataset & QLoRA on Tora-Code-13b, you can archieve 73% accuracy in the public test of Vietnamese Elemetary Maths - Zalo AI Challenge 2023." ]
[ 6, 161 ]
[ "passage: TAGS\n#region-us \n### Dataset description\nThis dataset contains both English and Vietnamese samples.\n\nEach sample has 3 solutions including a coding solution, an explanation solution and a direct solution. They are distinguished by the tokens \\<sol\\>, \\<exp\\> and \\<dir\\>, respectively.\n\nFor coding solution, execute the Python code wrapped inside ''' for the output result. DON'T TRUST the choice generated by model after coding, just execute the code and map it to the choice.\n\nWith this dataset & QLoRA on Tora-Code-13b, you can archieve 73% accuracy in the public test of Vietnamese Elemetary Maths - Zalo AI Challenge 2023." ]
4f2091a95a40ac335c99199ee241ef220122142f
Notebook: https://www.kaggle.com/code/hasanmesbaulalitaher/tamil-voice-dataset-preparation/notebook
Hasan-Mesbaul-420/Tamil_speech
[ "region:us" ]
2023-11-29T07:58:13+00:00
{"dataset_info": {"features": [{"name": "path", "dtype": "string"}, {"name": "array", "sequence": "float64"}, {"name": "sampling_rate", "dtype": "int64"}, {"name": "Text File Path", "dtype": "string"}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2280662818, "num_examples": 908}, {"name": "test", "num_bytes": 675141227, "num_examples": 351}], "download_size": 3191937492, "dataset_size": 2955804045}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]}
2023-12-12T01:48:23+00:00
[]
[]
TAGS #region-us
Notebook: URL
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
d1797f4357fcaafd237c3c930966ae8aed9c6200
## Data Description With the rapid development of large language models, the demand for high-quality datasets in both the industry and academia is growing. These datasets not only need to contain a vast amount of information but also require rigorous screening and cleaning to ensure their accuracy and the safety of downstream models and applications. However, the currently popular public datasets in the industry have certain quality and security risks, especially in the Chinese domain where high-quality datasets are particularly lacking. Moreover, constructing a safe Chinese dataset also faces many challenges. Therefore, building a dataset that has undergone strict screening and standardized processing is particularly important for the innovation and development of LLMs. Our CCI (Chinese Corpora Internet) dataset consists of high-quality, trustworthy sources from internet sites within mainland China. It has undergone rigorous data cleaning and deduplication, with targeted detection and filtering in aspects of content quality. The rules for data processing include: - Rule-based filtering: density-based extraction, keyword filtering, spam information filtering, conversion between simplified and traditional Chinese, etc. - Model-based filtering: filtering of low-quality content by training a classification model - Deduplication: within and between datasets dedup Additionally, in response to the issue of pre-training data being large in scale and prone to causing leaks of evaluation data, we specifically conduct rigorous screening and filtering of several current mainstream Chinese evaluation datasets during the data processing phase. The CCI corpus released (CCI v1.0.0) is 104GB in size. The overall timespan of the dataset ranges from January 2001 to November 2023. ## Update - November 29, 2023, CCI v1.0.0 released! ## Data Format | Field | Type | Meaning | | :-: | :-: | :-: | | id | String | Document ID, globally unique | | title | String | Document title | | content | String | Content of the document | ## Sample ```json { "id": "a262c26c915762ae107019f2797fda03", "title": "深圳人工智能企业闪耀东京展会", "content": "拳头产品叫好又叫座 深圳人工智能企业闪耀东京展会 近日在东京举行的日本人工智能展上,由深圳市贸促委组织的深圳人工智能企业展团集中亮相,引起热烈关注。该展会是日本规模最大的人工智能展会,云鲸智能、思谋科技、魔耳智能、格瑞普电池、云译科技等近20家深圳人工智能代表性企业的最新人工智能产品吸引了众多当地专业观众的目光,成为展会上的一抹亮色。企业现场“揽单”,参展成果丰硕深圳市大象机器人科技有限公司是一家由海外留学人才来深创建的专注于机器人研发生产的专精特新企业,本次在东京,该公司重点展示了myCobot协作机器人和仿真宠物猫metacat等公司拳头产品。“参展期间我们接待客户数达到500位以上,有意愿成为分销伙伴、集成商或终端客户的有效意向客户近70人,成效相当不错。……" } ``` ## Download The CCI dataset is simultaneously open-sourced on the [BAAI DataHub](https://data.baai.ac.cn/data) and Huggingface. ### BAAI DataHub Users can click the link [CCI Dataset](https://data.baai.ac.cn/details/BAAI-CCI) to view the data files, and click to download. Note that users need to register on BAAI DataHub to use the data, and filling out a survey questionnaire is required before their first download. ### Huggingface To use the data, you can load it using the following code: ```python from datasets import load_dataset # If the dataset is gated/private, make sure you have run huggingface-cli login dataset = load_dataset("BAAI/CCI-Data") ``` ## User Agreement Users need to comply with the usage agreement of the CCI dataset. You can view the agreement by clicking on the following link: ([View Usage Agreement](https://data.baai.ac.cn/resources/agreement/cci_usage_aggrement.pdf)). ## Notice If you have any questions related to this dataset, please contact [email protected].
BAAI/CCI-Data
[ "task_categories:text-generation", "size_categories:10M<n<100M", "language:zh", "region:us" ]
2023-11-29T08:10:39+00:00
{"language": ["zh"], "size_categories": ["10M<n<100M"], "task_categories": ["text-generation"]}
2023-11-29T08:23:42+00:00
[]
[ "zh" ]
TAGS #task_categories-text-generation #size_categories-10M<n<100M #language-Chinese #region-us
Data Description ---------------- With the rapid development of large language models, the demand for high-quality datasets in both the industry and academia is growing. These datasets not only need to contain a vast amount of information but also require rigorous screening and cleaning to ensure their accuracy and the safety of downstream models and applications. However, the currently popular public datasets in the industry have certain quality and security risks, especially in the Chinese domain where high-quality datasets are particularly lacking. Moreover, constructing a safe Chinese dataset also faces many challenges. Therefore, building a dataset that has undergone strict screening and standardized processing is particularly important for the innovation and development of LLMs. Our CCI (Chinese Corpora Internet) dataset consists of high-quality, trustworthy sources from internet sites within mainland China. It has undergone rigorous data cleaning and deduplication, with targeted detection and filtering in aspects of content quality. The rules for data processing include: * Rule-based filtering: density-based extraction, keyword filtering, spam information filtering, conversion between simplified and traditional Chinese, etc. * Model-based filtering: filtering of low-quality content by training a classification model * Deduplication: within and between datasets dedup Additionally, in response to the issue of pre-training data being large in scale and prone to causing leaks of evaluation data, we specifically conduct rigorous screening and filtering of several current mainstream Chinese evaluation datasets during the data processing phase. The CCI corpus released (CCI v1.0.0) is 104GB in size. The overall timespan of the dataset ranges from January 2001 to November 2023. Update ------ * November 29, 2023, CCI v1.0.0 released! Data Format ----------- Sample ------ Download -------- The CCI dataset is simultaneously open-sourced on the BAAI DataHub and Huggingface. ### BAAI DataHub Users can click the link CCI Dataset to view the data files, and click to download. Note that users need to register on BAAI DataHub to use the data, and filling out a survey questionnaire is required before their first download. ### Huggingface To use the data, you can load it using the following code: User Agreement -------------- Users need to comply with the usage agreement of the CCI dataset. You can view the agreement by clicking on the following link: (View Usage Agreement). Notice ------ If you have any questions related to this dataset, please contact data@URL.
[ "### BAAI DataHub\n\n\nUsers can click the link CCI Dataset to view the data files, and click to download.\n\n\nNote that users need to register on BAAI DataHub to use the data, and filling out a survey questionnaire is required before their first download.", "### Huggingface\n\n\nTo use the data, you can load it using the following code:\n\n\nUser Agreement\n--------------\n\n\nUsers need to comply with the usage agreement of the CCI dataset. You can view the agreement by clicking on the following link: (View Usage Agreement).\n\n\nNotice\n------\n\n\nIf you have any questions related to this dataset, please contact data@URL." ]
[ "TAGS\n#task_categories-text-generation #size_categories-10M<n<100M #language-Chinese #region-us \n", "### BAAI DataHub\n\n\nUsers can click the link CCI Dataset to view the data files, and click to download.\n\n\nNote that users need to register on BAAI DataHub to use the data, and filling out a survey questionnaire is required before their first download.", "### Huggingface\n\n\nTo use the data, you can load it using the following code:\n\n\nUser Agreement\n--------------\n\n\nUsers need to comply with the usage agreement of the CCI dataset. You can view the agreement by clicking on the following link: (View Usage Agreement).\n\n\nNotice\n------\n\n\nIf you have any questions related to this dataset, please contact data@URL." ]
[ 34, 58, 80 ]
[ "passage: TAGS\n#task_categories-text-generation #size_categories-10M<n<100M #language-Chinese #region-us \n### BAAI DataHub\n\n\nUsers can click the link CCI Dataset to view the data files, and click to download.\n\n\nNote that users need to register on BAAI DataHub to use the data, and filling out a survey questionnaire is required before their first download.### Huggingface\n\n\nTo use the data, you can load it using the following code:\n\n\nUser Agreement\n--------------\n\n\nUsers need to comply with the usage agreement of the CCI dataset. You can view the agreement by clicking on the following link: (View Usage Agreement).\n\n\nNotice\n------\n\n\nIf you have any questions related to this dataset, please contact data@URL." ]
c44d705fe0cb1ab0d3edaa257c9a10571969efaa
# Bangumi Image Base of Durarara!! This is the image base of bangumi Durarara!!, we detected 78 characters, 7823 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 1153 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 159 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 185 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 224 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 177 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 237 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 267 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 67 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 38 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 129 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 375 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 34 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 75 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 33 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 24 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 177 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 112 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 124 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 142 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 133 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 132 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 87 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 34 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 161 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 440 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 297 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 62 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 34 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 48 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | ![preview 8](28/preview_8.png) | | 29 | 68 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | ![preview 7](29/preview_7.png) | ![preview 8](29/preview_8.png) | | 30 | 31 | [Download](30/dataset.zip) | ![preview 1](30/preview_1.png) | ![preview 2](30/preview_2.png) | ![preview 3](30/preview_3.png) | ![preview 4](30/preview_4.png) | ![preview 5](30/preview_5.png) | ![preview 6](30/preview_6.png) | ![preview 7](30/preview_7.png) | ![preview 8](30/preview_8.png) | | 31 | 106 | [Download](31/dataset.zip) | ![preview 1](31/preview_1.png) | ![preview 2](31/preview_2.png) | ![preview 3](31/preview_3.png) | ![preview 4](31/preview_4.png) | ![preview 5](31/preview_5.png) | ![preview 6](31/preview_6.png) | ![preview 7](31/preview_7.png) | ![preview 8](31/preview_8.png) | | 32 | 62 | [Download](32/dataset.zip) | ![preview 1](32/preview_1.png) | ![preview 2](32/preview_2.png) | ![preview 3](32/preview_3.png) | ![preview 4](32/preview_4.png) | ![preview 5](32/preview_5.png) | ![preview 6](32/preview_6.png) | ![preview 7](32/preview_7.png) | ![preview 8](32/preview_8.png) | | 33 | 82 | [Download](33/dataset.zip) | ![preview 1](33/preview_1.png) | ![preview 2](33/preview_2.png) | ![preview 3](33/preview_3.png) | ![preview 4](33/preview_4.png) | ![preview 5](33/preview_5.png) | ![preview 6](33/preview_6.png) | ![preview 7](33/preview_7.png) | ![preview 8](33/preview_8.png) | | 34 | 20 | [Download](34/dataset.zip) | ![preview 1](34/preview_1.png) | ![preview 2](34/preview_2.png) | ![preview 3](34/preview_3.png) | ![preview 4](34/preview_4.png) | ![preview 5](34/preview_5.png) | ![preview 6](34/preview_6.png) | ![preview 7](34/preview_7.png) | ![preview 8](34/preview_8.png) | | 35 | 64 | [Download](35/dataset.zip) | ![preview 1](35/preview_1.png) | ![preview 2](35/preview_2.png) | ![preview 3](35/preview_3.png) | ![preview 4](35/preview_4.png) | ![preview 5](35/preview_5.png) | ![preview 6](35/preview_6.png) | ![preview 7](35/preview_7.png) | ![preview 8](35/preview_8.png) | | 36 | 37 | [Download](36/dataset.zip) | ![preview 1](36/preview_1.png) | ![preview 2](36/preview_2.png) | ![preview 3](36/preview_3.png) | ![preview 4](36/preview_4.png) | ![preview 5](36/preview_5.png) | ![preview 6](36/preview_6.png) | ![preview 7](36/preview_7.png) | ![preview 8](36/preview_8.png) | | 37 | 83 | [Download](37/dataset.zip) | ![preview 1](37/preview_1.png) | ![preview 2](37/preview_2.png) | ![preview 3](37/preview_3.png) | ![preview 4](37/preview_4.png) | ![preview 5](37/preview_5.png) | ![preview 6](37/preview_6.png) | ![preview 7](37/preview_7.png) | ![preview 8](37/preview_8.png) | | 38 | 39 | [Download](38/dataset.zip) | ![preview 1](38/preview_1.png) | ![preview 2](38/preview_2.png) | ![preview 3](38/preview_3.png) | ![preview 4](38/preview_4.png) | ![preview 5](38/preview_5.png) | ![preview 6](38/preview_6.png) | ![preview 7](38/preview_7.png) | ![preview 8](38/preview_8.png) | | 39 | 21 | [Download](39/dataset.zip) | ![preview 1](39/preview_1.png) | ![preview 2](39/preview_2.png) | ![preview 3](39/preview_3.png) | ![preview 4](39/preview_4.png) | ![preview 5](39/preview_5.png) | ![preview 6](39/preview_6.png) | ![preview 7](39/preview_7.png) | ![preview 8](39/preview_8.png) | | 40 | 59 | [Download](40/dataset.zip) | ![preview 1](40/preview_1.png) | ![preview 2](40/preview_2.png) | ![preview 3](40/preview_3.png) | ![preview 4](40/preview_4.png) | ![preview 5](40/preview_5.png) | ![preview 6](40/preview_6.png) | ![preview 7](40/preview_7.png) | ![preview 8](40/preview_8.png) | | 41 | 31 | [Download](41/dataset.zip) | ![preview 1](41/preview_1.png) | ![preview 2](41/preview_2.png) | ![preview 3](41/preview_3.png) | ![preview 4](41/preview_4.png) | ![preview 5](41/preview_5.png) | ![preview 6](41/preview_6.png) | ![preview 7](41/preview_7.png) | ![preview 8](41/preview_8.png) | | 42 | 653 | [Download](42/dataset.zip) | ![preview 1](42/preview_1.png) | ![preview 2](42/preview_2.png) | ![preview 3](42/preview_3.png) | ![preview 4](42/preview_4.png) | ![preview 5](42/preview_5.png) | ![preview 6](42/preview_6.png) | ![preview 7](42/preview_7.png) | ![preview 8](42/preview_8.png) | | 43 | 23 | [Download](43/dataset.zip) | ![preview 1](43/preview_1.png) | ![preview 2](43/preview_2.png) | ![preview 3](43/preview_3.png) | ![preview 4](43/preview_4.png) | ![preview 5](43/preview_5.png) | ![preview 6](43/preview_6.png) | ![preview 7](43/preview_7.png) | ![preview 8](43/preview_8.png) | | 44 | 120 | [Download](44/dataset.zip) | ![preview 1](44/preview_1.png) | ![preview 2](44/preview_2.png) | ![preview 3](44/preview_3.png) | ![preview 4](44/preview_4.png) | ![preview 5](44/preview_5.png) | ![preview 6](44/preview_6.png) | ![preview 7](44/preview_7.png) | ![preview 8](44/preview_8.png) | | 45 | 22 | [Download](45/dataset.zip) | ![preview 1](45/preview_1.png) | ![preview 2](45/preview_2.png) | ![preview 3](45/preview_3.png) | ![preview 4](45/preview_4.png) | ![preview 5](45/preview_5.png) | ![preview 6](45/preview_6.png) | ![preview 7](45/preview_7.png) | ![preview 8](45/preview_8.png) | | 46 | 22 | [Download](46/dataset.zip) | ![preview 1](46/preview_1.png) | ![preview 2](46/preview_2.png) | ![preview 3](46/preview_3.png) | ![preview 4](46/preview_4.png) | ![preview 5](46/preview_5.png) | ![preview 6](46/preview_6.png) | ![preview 7](46/preview_7.png) | ![preview 8](46/preview_8.png) | | 47 | 36 | [Download](47/dataset.zip) | ![preview 1](47/preview_1.png) | ![preview 2](47/preview_2.png) | ![preview 3](47/preview_3.png) | ![preview 4](47/preview_4.png) | ![preview 5](47/preview_5.png) | ![preview 6](47/preview_6.png) | ![preview 7](47/preview_7.png) | ![preview 8](47/preview_8.png) | | 48 | 162 | [Download](48/dataset.zip) | ![preview 1](48/preview_1.png) | ![preview 2](48/preview_2.png) | ![preview 3](48/preview_3.png) | ![preview 4](48/preview_4.png) | ![preview 5](48/preview_5.png) | ![preview 6](48/preview_6.png) | ![preview 7](48/preview_7.png) | ![preview 8](48/preview_8.png) | | 49 | 11 | [Download](49/dataset.zip) | ![preview 1](49/preview_1.png) | ![preview 2](49/preview_2.png) | ![preview 3](49/preview_3.png) | ![preview 4](49/preview_4.png) | ![preview 5](49/preview_5.png) | ![preview 6](49/preview_6.png) | ![preview 7](49/preview_7.png) | ![preview 8](49/preview_8.png) | | 50 | 64 | [Download](50/dataset.zip) | ![preview 1](50/preview_1.png) | ![preview 2](50/preview_2.png) | ![preview 3](50/preview_3.png) | ![preview 4](50/preview_4.png) | ![preview 5](50/preview_5.png) | ![preview 6](50/preview_6.png) | ![preview 7](50/preview_7.png) | ![preview 8](50/preview_8.png) | | 51 | 8 | [Download](51/dataset.zip) | ![preview 1](51/preview_1.png) | ![preview 2](51/preview_2.png) | ![preview 3](51/preview_3.png) | ![preview 4](51/preview_4.png) | ![preview 5](51/preview_5.png) | ![preview 6](51/preview_6.png) | ![preview 7](51/preview_7.png) | ![preview 8](51/preview_8.png) | | 52 | 13 | [Download](52/dataset.zip) | ![preview 1](52/preview_1.png) | ![preview 2](52/preview_2.png) | ![preview 3](52/preview_3.png) | ![preview 4](52/preview_4.png) | ![preview 5](52/preview_5.png) | ![preview 6](52/preview_6.png) | ![preview 7](52/preview_7.png) | ![preview 8](52/preview_8.png) | | 53 | 19 | [Download](53/dataset.zip) | ![preview 1](53/preview_1.png) | ![preview 2](53/preview_2.png) | ![preview 3](53/preview_3.png) | ![preview 4](53/preview_4.png) | ![preview 5](53/preview_5.png) | ![preview 6](53/preview_6.png) | ![preview 7](53/preview_7.png) | ![preview 8](53/preview_8.png) | | 54 | 171 | [Download](54/dataset.zip) | ![preview 1](54/preview_1.png) | ![preview 2](54/preview_2.png) | ![preview 3](54/preview_3.png) | ![preview 4](54/preview_4.png) | ![preview 5](54/preview_5.png) | ![preview 6](54/preview_6.png) | ![preview 7](54/preview_7.png) | ![preview 8](54/preview_8.png) | | 55 | 14 | [Download](55/dataset.zip) | ![preview 1](55/preview_1.png) | ![preview 2](55/preview_2.png) | ![preview 3](55/preview_3.png) | ![preview 4](55/preview_4.png) | ![preview 5](55/preview_5.png) | ![preview 6](55/preview_6.png) | ![preview 7](55/preview_7.png) | ![preview 8](55/preview_8.png) | | 56 | 16 | [Download](56/dataset.zip) | ![preview 1](56/preview_1.png) | ![preview 2](56/preview_2.png) | ![preview 3](56/preview_3.png) | ![preview 4](56/preview_4.png) | ![preview 5](56/preview_5.png) | ![preview 6](56/preview_6.png) | ![preview 7](56/preview_7.png) | ![preview 8](56/preview_8.png) | | 57 | 19 | [Download](57/dataset.zip) | ![preview 1](57/preview_1.png) | ![preview 2](57/preview_2.png) | ![preview 3](57/preview_3.png) | ![preview 4](57/preview_4.png) | ![preview 5](57/preview_5.png) | ![preview 6](57/preview_6.png) | ![preview 7](57/preview_7.png) | ![preview 8](57/preview_8.png) | | 58 | 10 | [Download](58/dataset.zip) | ![preview 1](58/preview_1.png) | ![preview 2](58/preview_2.png) | ![preview 3](58/preview_3.png) | ![preview 4](58/preview_4.png) | ![preview 5](58/preview_5.png) | ![preview 6](58/preview_6.png) | ![preview 7](58/preview_7.png) | ![preview 8](58/preview_8.png) | | 59 | 64 | [Download](59/dataset.zip) | ![preview 1](59/preview_1.png) | ![preview 2](59/preview_2.png) | ![preview 3](59/preview_3.png) | ![preview 4](59/preview_4.png) | ![preview 5](59/preview_5.png) | ![preview 6](59/preview_6.png) | ![preview 7](59/preview_7.png) | ![preview 8](59/preview_8.png) | | 60 | 30 | [Download](60/dataset.zip) | ![preview 1](60/preview_1.png) | ![preview 2](60/preview_2.png) | ![preview 3](60/preview_3.png) | ![preview 4](60/preview_4.png) | ![preview 5](60/preview_5.png) | ![preview 6](60/preview_6.png) | ![preview 7](60/preview_7.png) | ![preview 8](60/preview_8.png) | | 61 | 16 | [Download](61/dataset.zip) | ![preview 1](61/preview_1.png) | ![preview 2](61/preview_2.png) | ![preview 3](61/preview_3.png) | ![preview 4](61/preview_4.png) | ![preview 5](61/preview_5.png) | ![preview 6](61/preview_6.png) | ![preview 7](61/preview_7.png) | ![preview 8](61/preview_8.png) | | 62 | 30 | [Download](62/dataset.zip) | ![preview 1](62/preview_1.png) | ![preview 2](62/preview_2.png) | ![preview 3](62/preview_3.png) | ![preview 4](62/preview_4.png) | ![preview 5](62/preview_5.png) | ![preview 6](62/preview_6.png) | ![preview 7](62/preview_7.png) | ![preview 8](62/preview_8.png) | | 63 | 18 | [Download](63/dataset.zip) | ![preview 1](63/preview_1.png) | ![preview 2](63/preview_2.png) | ![preview 3](63/preview_3.png) | ![preview 4](63/preview_4.png) | ![preview 5](63/preview_5.png) | ![preview 6](63/preview_6.png) | ![preview 7](63/preview_7.png) | ![preview 8](63/preview_8.png) | | 64 | 26 | [Download](64/dataset.zip) | ![preview 1](64/preview_1.png) | ![preview 2](64/preview_2.png) | ![preview 3](64/preview_3.png) | ![preview 4](64/preview_4.png) | ![preview 5](64/preview_5.png) | ![preview 6](64/preview_6.png) | ![preview 7](64/preview_7.png) | ![preview 8](64/preview_8.png) | | 65 | 9 | [Download](65/dataset.zip) | ![preview 1](65/preview_1.png) | ![preview 2](65/preview_2.png) | ![preview 3](65/preview_3.png) | ![preview 4](65/preview_4.png) | ![preview 5](65/preview_5.png) | ![preview 6](65/preview_6.png) | ![preview 7](65/preview_7.png) | ![preview 8](65/preview_8.png) | | 66 | 21 | [Download](66/dataset.zip) | ![preview 1](66/preview_1.png) | ![preview 2](66/preview_2.png) | ![preview 3](66/preview_3.png) | ![preview 4](66/preview_4.png) | ![preview 5](66/preview_5.png) | ![preview 6](66/preview_6.png) | ![preview 7](66/preview_7.png) | ![preview 8](66/preview_8.png) | | 67 | 12 | [Download](67/dataset.zip) | ![preview 1](67/preview_1.png) | ![preview 2](67/preview_2.png) | ![preview 3](67/preview_3.png) | ![preview 4](67/preview_4.png) | ![preview 5](67/preview_5.png) | ![preview 6](67/preview_6.png) | ![preview 7](67/preview_7.png) | ![preview 8](67/preview_8.png) | | 68 | 22 | [Download](68/dataset.zip) | ![preview 1](68/preview_1.png) | ![preview 2](68/preview_2.png) | ![preview 3](68/preview_3.png) | ![preview 4](68/preview_4.png) | ![preview 5](68/preview_5.png) | ![preview 6](68/preview_6.png) | ![preview 7](68/preview_7.png) | ![preview 8](68/preview_8.png) | | 69 | 28 | [Download](69/dataset.zip) | ![preview 1](69/preview_1.png) | ![preview 2](69/preview_2.png) | ![preview 3](69/preview_3.png) | ![preview 4](69/preview_4.png) | ![preview 5](69/preview_5.png) | ![preview 6](69/preview_6.png) | ![preview 7](69/preview_7.png) | ![preview 8](69/preview_8.png) | | 70 | 13 | [Download](70/dataset.zip) | ![preview 1](70/preview_1.png) | ![preview 2](70/preview_2.png) | ![preview 3](70/preview_3.png) | ![preview 4](70/preview_4.png) | ![preview 5](70/preview_5.png) | ![preview 6](70/preview_6.png) | ![preview 7](70/preview_7.png) | ![preview 8](70/preview_8.png) | | 71 | 6 | [Download](71/dataset.zip) | ![preview 1](71/preview_1.png) | ![preview 2](71/preview_2.png) | ![preview 3](71/preview_3.png) | ![preview 4](71/preview_4.png) | ![preview 5](71/preview_5.png) | ![preview 6](71/preview_6.png) | N/A | N/A | | 72 | 6 | [Download](72/dataset.zip) | ![preview 1](72/preview_1.png) | ![preview 2](72/preview_2.png) | ![preview 3](72/preview_3.png) | ![preview 4](72/preview_4.png) | ![preview 5](72/preview_5.png) | ![preview 6](72/preview_6.png) | N/A | N/A | | 73 | 17 | [Download](73/dataset.zip) | ![preview 1](73/preview_1.png) | ![preview 2](73/preview_2.png) | ![preview 3](73/preview_3.png) | ![preview 4](73/preview_4.png) | ![preview 5](73/preview_5.png) | ![preview 6](73/preview_6.png) | ![preview 7](73/preview_7.png) | ![preview 8](73/preview_8.png) | | 74 | 10 | [Download](74/dataset.zip) | ![preview 1](74/preview_1.png) | ![preview 2](74/preview_2.png) | ![preview 3](74/preview_3.png) | ![preview 4](74/preview_4.png) | ![preview 5](74/preview_5.png) | ![preview 6](74/preview_6.png) | ![preview 7](74/preview_7.png) | ![preview 8](74/preview_8.png) | | 75 | 12 | [Download](75/dataset.zip) | ![preview 1](75/preview_1.png) | ![preview 2](75/preview_2.png) | ![preview 3](75/preview_3.png) | ![preview 4](75/preview_4.png) | ![preview 5](75/preview_5.png) | ![preview 6](75/preview_6.png) | ![preview 7](75/preview_7.png) | ![preview 8](75/preview_8.png) | | 76 | 6 | [Download](76/dataset.zip) | ![preview 1](76/preview_1.png) | ![preview 2](76/preview_2.png) | ![preview 3](76/preview_3.png) | ![preview 4](76/preview_4.png) | ![preview 5](76/preview_5.png) | ![preview 6](76/preview_6.png) | N/A | N/A | | noise | 231 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/durarara
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-11-29T08:18:01+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-11-29T12:43:52+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Durarara!! ================================ This is the image base of bangumi Durarara!!, we detected 78 characters, 7823 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
4b6ed414b340a8a7e3bc975dfecdf6b3f80cd54c
# Dataset Card for "capstone_fromgpt_without_gold_v7" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Deojoandco/capstone_fromgpt_without_gold_v7
[ "region:us" ]
2023-11-29T08:21:29+00:00
{"dataset_info": {"features": [{"name": "dialog_id", "dtype": "int64"}, {"name": "dialogue", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gold_tags", "dtype": "string"}, {"name": "gpt_success", "dtype": "bool"}, {"name": "gpt_response", "dtype": "string"}, {"name": "gold_tags_tokens_count", "dtype": "int64"}, {"name": "GPT_TAGS_FOUND", "dtype": "bool"}, {"name": "gpt_output_tags", "dtype": "string"}, {"name": "gpt_output_tag_tokens_count", "dtype": "int64"}, {"name": "GPT_MI_FOUND", "dtype": "bool"}, {"name": "gpt_tags_token_count", "dtype": "int64"}, {"name": "gpt_tags", "dtype": "string"}, {"name": "tag_token_count_match", "dtype": "bool"}], "splits": [{"name": "test", "num_bytes": 21303, "num_examples": 12}], "download_size": 23320, "dataset_size": 21303}, "configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}]}
2023-11-29T08:21:33+00:00
[]
[]
TAGS #region-us
# Dataset Card for "capstone_fromgpt_without_gold_v7" More Information needed
[ "# Dataset Card for \"capstone_fromgpt_without_gold_v7\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"capstone_fromgpt_without_gold_v7\"\n\nMore Information needed" ]
[ 6, 24 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"capstone_fromgpt_without_gold_v7\"\n\nMore Information needed" ]
149f378f5b6a2721ca8aaeac0b3aa59c81de6959
# Dataset Card for Dataset Name <!-- Provide a quick summary of the dataset. --> This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
akramovic/american-airlines-hr
[ "region:us" ]
2023-11-29T08:33:12+00:00
{}
2023-11-29T08:53:34+00:00
[]
[]
TAGS #region-us
# Dataset Card for Dataset Name This dataset card aims to be a base template for new datasets. It has been generated using this raw template. ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 34, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
2b29cea241ee1722b6ffffaa3f00dfe9bad207d5
# Dataset Card for "gsm8k-llama-2-7b-sft-dpo" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Tianduo/gsm8k-llama-2-7b-sft-dpo
[ "region:us" ]
2023-11-29T08:34:40+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "gold_ans", "dtype": "string"}, {"name": "positives", "sequence": "string"}, {"name": "negatives", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 12187984, "num_examples": 7473}], "download_size": 5539563, "dataset_size": 12187984}}
2023-11-29T08:34:45+00:00
[]
[]
TAGS #region-us
# Dataset Card for "gsm8k-llama-2-7b-sft-dpo" More Information needed
[ "# Dataset Card for \"gsm8k-llama-2-7b-sft-dpo\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"gsm8k-llama-2-7b-sft-dpo\"\n\nMore Information needed" ]
[ 6, 26 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"gsm8k-llama-2-7b-sft-dpo\"\n\nMore Information needed" ]
6ef26cf57f1ee37294b30f349363c87b98f4dd3f
# Dataset Card for "capstone_fromgpt_without_gold_v8" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Deojoandco/capstone_fromgpt_without_gold_v8
[ "region:us" ]
2023-11-29T08:38:45+00:00
{"dataset_info": {"features": [{"name": "dialog_id", "dtype": "int64"}, {"name": "dialogue", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gold_tags", "dtype": "string"}, {"name": "gpt_success", "dtype": "bool"}, {"name": "gpt_response", "dtype": "string"}, {"name": "gold_tags_tokens_count", "dtype": "int64"}, {"name": "GPT_TAGS_FOUND", "dtype": "bool"}, {"name": "gpt_output_tags", "dtype": "string"}, {"name": "gpt_output_tag_tokens_count", "dtype": "int64"}, {"name": "GPT_MI_FOUND", "dtype": "bool"}, {"name": "gpt_tags_token_count", "dtype": "int64"}, {"name": "gpt_tags", "dtype": "string"}, {"name": "tag_token_count_match", "dtype": "bool"}], "splits": [{"name": "test", "num_bytes": 14476, "num_examples": 12}], "download_size": 17211, "dataset_size": 14476}, "configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}]}
2023-11-29T08:38:49+00:00
[]
[]
TAGS #region-us
# Dataset Card for "capstone_fromgpt_without_gold_v8" More Information needed
[ "# Dataset Card for \"capstone_fromgpt_without_gold_v8\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"capstone_fromgpt_without_gold_v8\"\n\nMore Information needed" ]
[ 6, 24 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"capstone_fromgpt_without_gold_v8\"\n\nMore Information needed" ]
2c1859d89e0d3dc175e609246bbb10f4e33cea5e
# Dataset Card for "capstone_fromgpt_without_gold_v9" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Deojoandco/capstone_fromgpt_without_gold_v9
[ "region:us" ]
2023-11-29T08:52:50+00:00
{"dataset_info": {"features": [{"name": "dialog_id", "dtype": "int64"}, {"name": "dialogue", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gold_tags", "dtype": "string"}, {"name": "gpt_success", "dtype": "bool"}, {"name": "gpt_response", "dtype": "string"}, {"name": "gold_tags_tokens_count", "dtype": "int64"}, {"name": "GPT_TAGS_FOUND", "dtype": "bool"}, {"name": "gpt_output_tags", "dtype": "string"}, {"name": "gpt_output_tag_tokens_count", "dtype": "int64"}, {"name": "GPT_MI_FOUND", "dtype": "bool"}, {"name": "gpt_tags_token_count", "dtype": "int64"}, {"name": "gpt_tags", "dtype": "string"}, {"name": "tag_token_count_match", "dtype": "bool"}], "splits": [{"name": "test", "num_bytes": 14648, "num_examples": 12}], "download_size": 17112, "dataset_size": 14648}, "configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}]}
2023-11-29T08:52:53+00:00
[]
[]
TAGS #region-us
# Dataset Card for "capstone_fromgpt_without_gold_v9" More Information needed
[ "# Dataset Card for \"capstone_fromgpt_without_gold_v9\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"capstone_fromgpt_without_gold_v9\"\n\nMore Information needed" ]
[ 6, 24 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"capstone_fromgpt_without_gold_v9\"\n\nMore Information needed" ]
13d77438d779558bbea3cbdb0195e37691bd3275
# Dataset Card for "ultrachat-75k-vi" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
nguyenthanhdo/ultrachat-75k-vi
[ "region:us" ]
2023-11-29T08:59:26+00:00
{"dataset_info": {"features": [{"name": "prompt_id", "dtype": "string"}, {"name": "prompt", "dtype": "string"}, {"name": "messages", "list": [{"name": "content", "dtype": "string"}, {"name": "role", "dtype": "string"}]}], "splits": [{"name": "train_sft", "num_bytes": 382341861, "num_examples": 74465}], "download_size": 174722122, "dataset_size": 382341861}, "configs": [{"config_name": "default", "data_files": [{"split": "train_sft", "path": "data/train_sft-*"}]}]}
2023-11-29T08:59:58+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ultrachat-75k-vi" More Information needed
[ "# Dataset Card for \"ultrachat-75k-vi\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ultrachat-75k-vi\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ultrachat-75k-vi\"\n\nMore Information needed" ]
b15636dccdde3c1865dbc72ca3d9fcdcdadd9eda
# Dataset Card for "contracts_v9" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
paul-w-qs/contracts_v9
[ "region:us" ]
2023-11-29T09:07:06+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "JSON_LABEL", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 84923820.0, "num_examples": 514}], "download_size": 84024431, "dataset_size": 84923820.0}}
2023-11-29T23:08:03+00:00
[]
[]
TAGS #region-us
# Dataset Card for "contracts_v9" More Information needed
[ "# Dataset Card for \"contracts_v9\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"contracts_v9\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"contracts_v9\"\n\nMore Information needed" ]
dcd0aa0f069c2d266d9813d40dc8e6ba167d9090
CURRENTLY UNDER CONSTRUCTION English Readme: Bridging Language Barriers and Empowering Marginalized Communities through increasing open Portuguese Language Education Text training data: In the realm of natural language processing (NLP), the availability of high-quality language data is crucial for developing effective NLP models. However, marginalized indigenous communities, particularly those in Brazil and the Amazon Rainforest, often lack access to NLP tools and resources, hindering their ability to fully engage in the digital age. The texts include Portuguese language learning textbooks, Portuguese language educational materials, important historical texts in the Portuguese lanugage (especially global south), and supplemental examples of high repute. Poetic and educational phrased texts have been prioritized in this collection.
Solshine/Portuguese_Language_Education_Texts
[ "license:mit", "region:us" ]
2023-11-29T09:31:53+00:00
{"license": "mit"}
2023-11-29T09:40:55+00:00
[]
[]
TAGS #license-mit #region-us
CURRENTLY UNDER CONSTRUCTION English Readme: Bridging Language Barriers and Empowering Marginalized Communities through increasing open Portuguese Language Education Text training data: In the realm of natural language processing (NLP), the availability of high-quality language data is crucial for developing effective NLP models. However, marginalized indigenous communities, particularly those in Brazil and the Amazon Rainforest, often lack access to NLP tools and resources, hindering their ability to fully engage in the digital age. The texts include Portuguese language learning textbooks, Portuguese language educational materials, important historical texts in the Portuguese lanugage (especially global south), and supplemental examples of high repute. Poetic and educational phrased texts have been prioritized in this collection.
[]
[ "TAGS\n#license-mit #region-us \n" ]
[ 11 ]
[ "passage: TAGS\n#license-mit #region-us \n" ]
59d47be65296b92865454f04e5645e37a40331e8
# Dataset Card for "capstone_fromgpt_without_gold_v9_all" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Deojoandco/capstone_fromgpt_without_gold_v9_all
[ "region:us" ]
2023-11-29T09:55:19+00:00
{"dataset_info": {"features": [{"name": "dialog_id", "dtype": "int64"}, {"name": "dialogue", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gold_tags", "dtype": "string"}, {"name": "gpt_success", "dtype": "bool"}, {"name": "gpt_response", "dtype": "string"}, {"name": "gold_tags_tokens_count", "dtype": "int64"}, {"name": "GPT_TAGS_FOUND", "dtype": "bool"}, {"name": "gpt_output_tags", "dtype": "string"}, {"name": "gpt_output_tag_tokens_count", "dtype": "int64"}, {"name": "GPT_MI_FOUND", "dtype": "bool"}, {"name": "gpt_tags_token_count", "dtype": "int64"}, {"name": "gpt_tags", "dtype": "string"}, {"name": "tag_token_count_match", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 124191, "num_examples": 76}, {"name": "validation", "num_bytes": 23023, "num_examples": 12}, {"name": "test", "num_bytes": 14536, "num_examples": 12}], "download_size": 82277, "dataset_size": 161750}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}]}
2023-11-29T09:55:33+00:00
[]
[]
TAGS #region-us
# Dataset Card for "capstone_fromgpt_without_gold_v9_all" More Information needed
[ "# Dataset Card for \"capstone_fromgpt_without_gold_v9_all\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"capstone_fromgpt_without_gold_v9_all\"\n\nMore Information needed" ]
[ 6, 26 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"capstone_fromgpt_without_gold_v9_all\"\n\nMore Information needed" ]
b97191a697e7e0255303c9461b58541c96fab4ee
# Turkish Event Detection Tweet Dataset: BilTweetNews The dataset contains tweets related to six major events from Turkish news sources between May 4, 2015 and Jan 8, 2017. There are 7 event classes: - E1: May 25, 2015 One of the popular football clubs in Turkey, Galatasaray, wins the 2015 Turkish Super League. - E2: Sep 6, 2015 A terrorist group, called PKK, attacked to soldiers in Dağlıca, a village in southeastern Turkey. - E3: Oct 7, 2015 A Turkish scientist, Aziz Sancar, won the 2015 Nobel Chemistry prize with his studies on DNA repair. - E4: May 27, 2016 A local football club of Alanya promoted to the Turkish Super League for the first time in their history. - E5: Jun 17, 2016 A traditional anthem that is mostly played by secularists in Turkey, called the 10th Year Anthem, was forbidden in schools by the director of national education in the Black Sea province of Bolu. - E6: Oct 17, 2016 A magazine programmer confused that Madonna in a Fur Coat, a book written in 1943 by a Turkish celebrated writer, Sabahattin Ali, was about popstar Madonna’s life. The book tells a story between a Turkish student and German singer after the World War I. - Other: Not related to any news topic For each event, 100 related-candidate and 60 unrelated-candidate tweets are selected. Lastly, we randomly select 40 tweets that are potentially not related at all, 5 of them are removed due to detecting near-duplicates later. The dataset has 995 tweets in total. The task of this dataset is event detection. The sentiment analysis labels can be found at https://huggingface.co/datasets/ctoraman/BilTweetNews-Sentiment All tweets are labeled by 17 annotators. We provide the normalized distribution of annotations across 7 event classes. We also provide the majority class at the last column. There are no cases where multiple classes have the highest score. Github Repo: https://github.com/BilkentInformationRetrievalGroup/BilTweetNews2017 # If you would like to use any material in this repository, please cite the following papers: - Toraman, C. Early Prediction of Public Reactions to News Events Using Microblogs. Seventh BCS-IRSG Symposium on Future Directions in Information Access (FDIA 2017), Barcelona, Spain, 5 September 2017. - Toraman, C. Event-related microblog retrieval in Turkish. Turkish Journal of Electrical Engineering and Computer Sciences. 2021. DOI: 10.3906/elk-2108-167 ****
ctoraman/BilTweetNews-event-detection
[ "task_categories:text-classification", "size_categories:n<1K", "language:tr", "license:cc-by-nc-sa-4.0", "text classification", "event detection", "topic detection", "tweets", "social media", "topic classification", "region:us" ]
2023-11-29T10:53:10+00:00
{"language": ["tr"], "license": "cc-by-nc-sa-4.0", "size_categories": ["n<1K"], "task_categories": ["text-classification"], "tags": ["text classification", "event detection", "topic detection", "tweets", "social media", "topic classification"]}
2023-11-29T11:09:34+00:00
[]
[ "tr" ]
TAGS #task_categories-text-classification #size_categories-n<1K #language-Turkish #license-cc-by-nc-sa-4.0 #text classification #event detection #topic detection #tweets #social media #topic classification #region-us
# Turkish Event Detection Tweet Dataset: BilTweetNews The dataset contains tweets related to six major events from Turkish news sources between May 4, 2015 and Jan 8, 2017. There are 7 event classes: - E1: May 25, 2015 One of the popular football clubs in Turkey, Galatasaray, wins the 2015 Turkish Super League. - E2: Sep 6, 2015 A terrorist group, called PKK, attacked to soldiers in Dağlıca, a village in southeastern Turkey. - E3: Oct 7, 2015 A Turkish scientist, Aziz Sancar, won the 2015 Nobel Chemistry prize with his studies on DNA repair. - E4: May 27, 2016 A local football club of Alanya promoted to the Turkish Super League for the first time in their history. - E5: Jun 17, 2016 A traditional anthem that is mostly played by secularists in Turkey, called the 10th Year Anthem, was forbidden in schools by the director of national education in the Black Sea province of Bolu. - E6: Oct 17, 2016 A magazine programmer confused that Madonna in a Fur Coat, a book written in 1943 by a Turkish celebrated writer, Sabahattin Ali, was about popstar Madonna’s life. The book tells a story between a Turkish student and German singer after the World War I. - Other: Not related to any news topic For each event, 100 related-candidate and 60 unrelated-candidate tweets are selected. Lastly, we randomly select 40 tweets that are potentially not related at all, 5 of them are removed due to detecting near-duplicates later. The dataset has 995 tweets in total. The task of this dataset is event detection. The sentiment analysis labels can be found at URL All tweets are labeled by 17 annotators. We provide the normalized distribution of annotations across 7 event classes. We also provide the majority class at the last column. There are no cases where multiple classes have the highest score. Github Repo: URL # If you would like to use any material in this repository, please cite the following papers: - Toraman, C. Early Prediction of Public Reactions to News Events Using Microblogs. Seventh BCS-IRSG Symposium on Future Directions in Information Access (FDIA 2017), Barcelona, Spain, 5 September 2017. - Toraman, C. Event-related microblog retrieval in Turkish. Turkish Journal of Electrical Engineering and Computer Sciences. 2021. DOI: 10.3906/elk-2108-167
[ "# Turkish Event Detection Tweet Dataset: BilTweetNews\n\nThe dataset contains tweets related to six major events from Turkish news sources between May 4, 2015\nand Jan 8, 2017. \n\nThere are 7 event classes:\n\n- E1: May 25, 2015 One of the popular football clubs in Turkey, Galatasaray, wins the 2015\nTurkish Super League.\n- E2: Sep 6, 2015 A terrorist group, called PKK, attacked to soldiers in Dağlıca, a village in\nsoutheastern Turkey. \n- E3: Oct 7, 2015 A Turkish scientist, Aziz Sancar, won the 2015 Nobel Chemistry prize with\nhis studies on DNA repair.\n- E4: May 27, 2016 A local football club of Alanya promoted to the Turkish Super League for\nthe first time in their history.\n- E5: Jun 17, 2016 A traditional anthem that is mostly played by secularists in Turkey, called\nthe 10th Year Anthem, was forbidden in schools by the director of national\neducation in the Black Sea province of Bolu. \n- E6: Oct 17, 2016 A magazine programmer confused that Madonna in a Fur Coat, a book written\nin 1943 by a Turkish celebrated writer, Sabahattin Ali, was about popstar\nMadonna’s life. The book tells a story between a Turkish student and German\nsinger after the World War I.\n- Other: Not related to any news topic \n\nFor each event, 100 related-candidate and 60 unrelated-candidate tweets are selected. Lastly, we randomly select 40 tweets that are potentially not related at all, 5 of them are\nremoved due to detecting near-duplicates later. The dataset has 995 tweets in total. \n\nThe task of this dataset is event detection. The sentiment analysis labels can be found at URL\n\nAll tweets are labeled by 17 annotators. We provide the normalized distribution of annotations across 7 event classes. We also provide the majority class at the last column. There are no cases where multiple classes have the highest score.\n\nGithub Repo: URL", "# If you would like to use any material in this repository, please cite the following papers:\n- Toraman, C. Early Prediction of Public Reactions to News Events Using Microblogs. Seventh BCS-IRSG Symposium on Future Directions in Information Access (FDIA 2017), Barcelona, Spain, 5 September 2017.\n\n- Toraman, C. Event-related microblog retrieval in Turkish. Turkish Journal of Electrical Engineering and Computer Sciences. 2021. DOI: 10.3906/elk-2108-167" ]
[ "TAGS\n#task_categories-text-classification #size_categories-n<1K #language-Turkish #license-cc-by-nc-sa-4.0 #text classification #event detection #topic detection #tweets #social media #topic classification #region-us \n", "# Turkish Event Detection Tweet Dataset: BilTweetNews\n\nThe dataset contains tweets related to six major events from Turkish news sources between May 4, 2015\nand Jan 8, 2017. \n\nThere are 7 event classes:\n\n- E1: May 25, 2015 One of the popular football clubs in Turkey, Galatasaray, wins the 2015\nTurkish Super League.\n- E2: Sep 6, 2015 A terrorist group, called PKK, attacked to soldiers in Dağlıca, a village in\nsoutheastern Turkey. \n- E3: Oct 7, 2015 A Turkish scientist, Aziz Sancar, won the 2015 Nobel Chemistry prize with\nhis studies on DNA repair.\n- E4: May 27, 2016 A local football club of Alanya promoted to the Turkish Super League for\nthe first time in their history.\n- E5: Jun 17, 2016 A traditional anthem that is mostly played by secularists in Turkey, called\nthe 10th Year Anthem, was forbidden in schools by the director of national\neducation in the Black Sea province of Bolu. \n- E6: Oct 17, 2016 A magazine programmer confused that Madonna in a Fur Coat, a book written\nin 1943 by a Turkish celebrated writer, Sabahattin Ali, was about popstar\nMadonna’s life. The book tells a story between a Turkish student and German\nsinger after the World War I.\n- Other: Not related to any news topic \n\nFor each event, 100 related-candidate and 60 unrelated-candidate tweets are selected. Lastly, we randomly select 40 tweets that are potentially not related at all, 5 of them are\nremoved due to detecting near-duplicates later. The dataset has 995 tweets in total. \n\nThe task of this dataset is event detection. The sentiment analysis labels can be found at URL\n\nAll tweets are labeled by 17 annotators. We provide the normalized distribution of annotations across 7 event classes. We also provide the majority class at the last column. There are no cases where multiple classes have the highest score.\n\nGithub Repo: URL", "# If you would like to use any material in this repository, please cite the following papers:\n- Toraman, C. Early Prediction of Public Reactions to News Events Using Microblogs. Seventh BCS-IRSG Symposium on Future Directions in Information Access (FDIA 2017), Barcelona, Spain, 5 September 2017.\n\n- Toraman, C. Event-related microblog retrieval in Turkish. Turkish Journal of Electrical Engineering and Computer Sciences. 2021. DOI: 10.3906/elk-2108-167" ]
[ 68, 448, 123 ]
[ "passage: TAGS\n#task_categories-text-classification #size_categories-n<1K #language-Turkish #license-cc-by-nc-sa-4.0 #text classification #event detection #topic detection #tweets #social media #topic classification #region-us \n" ]
013aa97621ce626c7a93504c09697027d742e318
# ShareGPT4V-ru ## Dataset Description - **Paper:** https://huggingface.co/papers/2311.12793 - **Repository** https://github.com/InternLM/InternLM-XComposer/tree/main/projects/ShareGPT4V - **Homepage** https://ShareGPT4V.github.io/ This is translated version **ShareGPT4V(102k)** subset of [Lin-Chen/ShareGPT4V](https://huggingface.co/datasets/Lin-Chen/ShareGPT4V) dataset into Russian.
d0rj/ShareGPT4V-ru
[ "task_categories:visual-question-answering", "task_categories:question-answering", "task_categories:conversational", "language_creators:translated", "multilinguality:monolingual", "size_categories:100K<n<1M", "source_datasets:Lin-Chen/ShareGPT4V", "language:ru", "license:cc-by-nc-4.0", "chat", "visual-chat", "multimodal-chat", "arxiv:2311.12793", "region:us" ]
2023-11-29T11:04:35+00:00
{"language_creators": ["translated"], "language": ["ru"], "license": "cc-by-nc-4.0", "multilinguality": ["monolingual"], "size_categories": ["100K<n<1M"], "source_datasets": ["Lin-Chen/ShareGPT4V"], "task_categories": ["visual-question-answering", "question-answering", "conversational"], "paperswithcode_id": "sharegpt4v", "pretty_name": "ShareGPT4V (ru)", "dataset_info": {"features": [{"name": "image", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "conversations", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 192931273, "num_examples": 102025}], "download_size": 82097525, "dataset_size": 192931273}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "tags": ["chat", "visual-chat", "multimodal-chat"]}
2023-11-30T08:56:12+00:00
[ "2311.12793" ]
[ "ru" ]
TAGS #task_categories-visual-question-answering #task_categories-question-answering #task_categories-conversational #language_creators-translated #multilinguality-monolingual #size_categories-100K<n<1M #source_datasets-Lin-Chen/ShareGPT4V #language-Russian #license-cc-by-nc-4.0 #chat #visual-chat #multimodal-chat #arxiv-2311.12793 #region-us
# ShareGPT4V-ru ## Dataset Description - Paper: URL - Repository URL - Homepage URL This is translated version ShareGPT4V(102k) subset of Lin-Chen/ShareGPT4V dataset into Russian.
[ "# ShareGPT4V-ru", "## Dataset Description\n\n- Paper: URL\n- Repository URL\n- Homepage URL\n\nThis is translated version ShareGPT4V(102k) subset of Lin-Chen/ShareGPT4V dataset into Russian." ]
[ "TAGS\n#task_categories-visual-question-answering #task_categories-question-answering #task_categories-conversational #language_creators-translated #multilinguality-monolingual #size_categories-100K<n<1M #source_datasets-Lin-Chen/ShareGPT4V #language-Russian #license-cc-by-nc-4.0 #chat #visual-chat #multimodal-chat #arxiv-2311.12793 #region-us \n", "# ShareGPT4V-ru", "## Dataset Description\n\n- Paper: URL\n- Repository URL\n- Homepage URL\n\nThis is translated version ShareGPT4V(102k) subset of Lin-Chen/ShareGPT4V dataset into Russian." ]
[ 129, 8, 49 ]
[ "passage: TAGS\n#task_categories-visual-question-answering #task_categories-question-answering #task_categories-conversational #language_creators-translated #multilinguality-monolingual #size_categories-100K<n<1M #source_datasets-Lin-Chen/ShareGPT4V #language-Russian #license-cc-by-nc-4.0 #chat #visual-chat #multimodal-chat #arxiv-2311.12793 #region-us \n# ShareGPT4V-ru## Dataset Description\n\n- Paper: URL\n- Repository URL\n- Homepage URL\n\nThis is translated version ShareGPT4V(102k) subset of Lin-Chen/ShareGPT4V dataset into Russian." ]
30ab97c829cca0bf0dad2d9b10cd34a99171ac37
# Dataset Card for oasst_response_comparison This dataset has been created with [Argilla](https://docs.argilla.io). As shown in the sections below, this dataset can be loaded into Argilla as explained in [Load with Argilla](#load-with-argilla), or used directly with the `datasets` library in [Load with `datasets`](#load-with-datasets). ## Dataset Description - **Homepage:** https://argilla.io - **Repository:** https://github.com/argilla-io/argilla - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This dataset contains: * A dataset configuration file conforming to the Argilla dataset format named `argilla.yaml`. This configuration file will be used to configure the dataset when using the `FeedbackDataset.from_huggingface` method in Argilla. * Dataset records in a format compatible with HuggingFace `datasets`. These records will be loaded automatically when using `FeedbackDataset.from_huggingface` and can be loaded independently using the `datasets` library via `load_dataset`. * The [annotation guidelines](#annotation-guidelines) that have been used for building and curating the dataset, if they've been defined in Argilla. ### Load with Argilla To load with Argilla, you'll just need to install Argilla as `pip install argilla --upgrade` and then use the following code: ```python import argilla as rg ds = rg.FeedbackDataset.from_huggingface("nataliaElv/oasst_response_comparison") ``` ### Load with `datasets` To load this dataset with `datasets`, you'll just need to install `datasets` as `pip install datasets --upgrade` and then use the following code: ```python from datasets import load_dataset ds = load_dataset("nataliaElv/oasst_response_comparison") ``` ### Supported Tasks and Leaderboards This dataset can contain [multiple fields, questions and responses](https://docs.argilla.io/en/latest/conceptual_guides/data_model.html#feedback-dataset) so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the [Dataset Structure section](#dataset-structure). There are no leaderboards associated with this dataset. ### Languages [More Information Needed] ## Dataset Structure ### Data in Argilla The dataset is created in Argilla with: **fields**, **questions**, **suggestions**, **metadata**, **vectors**, and **guidelines**. The **fields** are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. | Field Name | Title | Type | Required | Markdown | | ---------- | ----- | ---- | -------- | -------- | | prompt | Prompt | text | True | True | | reply_1 | Reply 1 | text | True | True | | reply_2 | Reply 2 | text | True | True | | reply_3 | Reply 3 | text | True | True | The **questions** are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label_selection, multi_label_selection, or ranking. | Question Name | Title | Type | Required | Description | Values/Labels | | ------------- | ----- | ---- | -------- | ----------- | ------------- | | reply-ranking | Order replies based on your preference | ranking | True | Order the replies from 1 to 3 based on your preference. | ['reply-1', 'reply-2', 'reply-3'] | The **suggestions** are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending "-suggestion" and "-suggestion-metadata" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with "-suggestion" and the metadata is appended with "-suggestion-metadata". The **metadata** is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the `metadata_properties` defined in the dataset configuration file in `argilla.yaml`. | Metadata Name | Title | Type | Values | Visible for Annotators | | ------------- | ----- | ---- | ------ | ---------------------- | The **guidelines**, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the [annotation guidelines](#annotation-guidelines) section. ### Data Instances An example of a dataset instance in Argilla looks as follows: ```json { "external_id": null, "fields": { "prompt": "Can you write a short introduction about the relevance of the term \"monopsony\" in economics? Please use examples related to potential monopsonies in the labour market and cite relevant research.", "reply_1": "\"Monopsony\" refers to a market structure where there is only one buyer for a particular good or service. In economics, this term is particularly relevant in the labor market, where a monopsony employer has significant power over the wages and working conditions of their employees. The presence of a monopsony can result in lower wages and reduced employment opportunities for workers, as the employer has little incentive to increase wages or provide better working conditions.\n\nRecent research has identified potential monopsonies in industries such as retail and fast food, where a few large companies control a significant portion of the market (Bivens \u0026 Mishel, 2013). In these industries, workers often face low wages, limited benefits, and reduced bargaining power, leading to a situation where they are dependent on the employer for their livelihood. This dependence can result in further suppression of wages and a decline in working conditions.\n\nOverall, the concept of monopsony is essential to understanding the dynamics of labor markets and the impact of market power on workers. Further research is needed to understand the extent and impact of monopsonies on the economy and to develop policies to address this issue.\n\nReferences:\nBivens, J., \u0026 Mishel, L. (2013). The Pay of Corporate Executives and Financial Professionals as Evidence of Rents in Top 1 Percent Incomes. Journal of Economic Perspectives, 27(3), 57-78.", "reply_2": "Monopsony is a market structure in which there is a single buyer in a market. In the context of labor markets, a monopsony occurs when there is only one employer in a particular industry or geographic area, giving that employer significant power over the wages and working conditions of workers. This results in a situation where the employer has the ability to suppress wages and reduce the bargaining power of workers.\n\nResearch has shown that the existence of monopsony power in labor markets can have negative effects on workers, such as lower wages, reduced benefits, and limited job opportunities. For example, a study by Autor, Dorn, and Hanson (2013) found evidence of monopsony power in certain U.S. labor markets, resulting in lower wages and reduced job opportunities for workers. Similarly, another study by Bharadwaj, Black, and Kolesnikova (2019) found that the presence of a single dominant employer in a local labor market was associated with lower wages for workers.", "reply_3": "Monopsony refers to a market structure where there is only one buyer of a good or service. In the context of labor markets, a monopsony occurs when there is only one employer in a particular geographic area or industry, giving them significant power to set wages and working conditions. Research has shown that the existence of monopsonies can lead to lower wages and reduced job opportunities for workers. For example, a study by the National Bureau of Economic Research found that in industries with high levels of concentration, workers earn lower wages and are less likely to receive benefits such as health insurance." }, "metadata": {}, "responses": [], "suggestions": [], "vectors": {} } ``` While the same record in HuggingFace `datasets` looks as follows: ```json { "external_id": null, "metadata": "{}", "prompt": "Can you write a short introduction about the relevance of the term \"monopsony\" in economics? Please use examples related to potential monopsonies in the labour market and cite relevant research.", "reply-ranking": [], "reply-ranking-suggestion": null, "reply-ranking-suggestion-metadata": { "agent": null, "score": null, "type": null }, "reply_1": "\"Monopsony\" refers to a market structure where there is only one buyer for a particular good or service. In economics, this term is particularly relevant in the labor market, where a monopsony employer has significant power over the wages and working conditions of their employees. The presence of a monopsony can result in lower wages and reduced employment opportunities for workers, as the employer has little incentive to increase wages or provide better working conditions.\n\nRecent research has identified potential monopsonies in industries such as retail and fast food, where a few large companies control a significant portion of the market (Bivens \u0026 Mishel, 2013). In these industries, workers often face low wages, limited benefits, and reduced bargaining power, leading to a situation where they are dependent on the employer for their livelihood. This dependence can result in further suppression of wages and a decline in working conditions.\n\nOverall, the concept of monopsony is essential to understanding the dynamics of labor markets and the impact of market power on workers. Further research is needed to understand the extent and impact of monopsonies on the economy and to develop policies to address this issue.\n\nReferences:\nBivens, J., \u0026 Mishel, L. (2013). The Pay of Corporate Executives and Financial Professionals as Evidence of Rents in Top 1 Percent Incomes. Journal of Economic Perspectives, 27(3), 57-78.", "reply_2": "Monopsony is a market structure in which there is a single buyer in a market. In the context of labor markets, a monopsony occurs when there is only one employer in a particular industry or geographic area, giving that employer significant power over the wages and working conditions of workers. This results in a situation where the employer has the ability to suppress wages and reduce the bargaining power of workers.\n\nResearch has shown that the existence of monopsony power in labor markets can have negative effects on workers, such as lower wages, reduced benefits, and limited job opportunities. For example, a study by Autor, Dorn, and Hanson (2013) found evidence of monopsony power in certain U.S. labor markets, resulting in lower wages and reduced job opportunities for workers. Similarly, another study by Bharadwaj, Black, and Kolesnikova (2019) found that the presence of a single dominant employer in a local labor market was associated with lower wages for workers.", "reply_3": "Monopsony refers to a market structure where there is only one buyer of a good or service. In the context of labor markets, a monopsony occurs when there is only one employer in a particular geographic area or industry, giving them significant power to set wages and working conditions. Research has shown that the existence of monopsonies can lead to lower wages and reduced job opportunities for workers. For example, a study by the National Bureau of Economic Research found that in industries with high levels of concentration, workers earn lower wages and are less likely to receive benefits such as health insurance." } ``` ### Data Fields Among the dataset fields, we differentiate between the following: * **Fields:** These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. * **prompt** is of type `text`. * **reply_1** is of type `text`. * **reply_2** is of type `text`. * **reply_3** is of type `text`. * **Questions:** These are the questions that will be asked to the annotators. They can be of different types, such as `RatingQuestion`, `TextQuestion`, `LabelQuestion`, `MultiLabelQuestion`, and `RankingQuestion`. * **reply-ranking** is of type `ranking` with the following allowed values ['reply-1', 'reply-2', 'reply-3'], and description "Order the replies from 1 to 3 based on your preference.". * **Suggestions:** As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable. * (optional) **reply-ranking-suggestion** is of type `ranking` with the following allowed values ['reply-1', 'reply-2', 'reply-3']. Additionally, we also have two more fields that are optional and are the following: * **metadata:** This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the `metadata_properties` defined in the dataset configuration file in `argilla.yaml`. * **external_id:** This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file. ### Data Splits The dataset contains a single split, which is `train`. ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation guidelines For each promt, order the replies in a ranking based on how clear and helpful you find each reply. Ties are allowed. If you prefer not to give an answer, click Discard and move on to the next record. #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
nataliaElv/oasst_response_comparison
[ "size_categories:1K<n<10K", "rlfh", "argilla", "human-feedback", "region:us" ]
2023-11-29T11:24:31+00:00
{"size_categories": "1K<n<10K", "tags": ["rlfh", "argilla", "human-feedback"]}
2023-11-29T11:24:39+00:00
[]
[]
TAGS #size_categories-1K<n<10K #rlfh #argilla #human-feedback #region-us
Dataset Card for oasst\_response\_comparison ============================================ This dataset has been created with Argilla. As shown in the sections below, this dataset can be loaded into Argilla as explained in Load with Argilla, or used directly with the 'datasets' library in Load with 'datasets'. Dataset Description ------------------- * Homepage: URL * Repository: URL * Paper: * Leaderboard: * Point of Contact: ### Dataset Summary This dataset contains: * A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\_huggingface' method in Argilla. * Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\_huggingface' and can be loaded independently using the 'datasets' library via 'load\_dataset'. * The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla. ### Load with Argilla To load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code: ### Load with 'datasets' To load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code: ### Supported Tasks and Leaderboards This dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section. There are no leaderboards associated with this dataset. ### Languages Dataset Structure ----------------- ### Data in Argilla The dataset is created in Argilla with: fields, questions, suggestions, metadata, vectors, and guidelines. The fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. The questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\_selection, multi\_label\_selection, or ranking. The suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending "-suggestion" and "-suggestion-metadata" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with "-suggestion" and the metadata is appended with "-suggestion-metadata". The metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\_properties' defined in the dataset configuration file in 'URL'. The guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section. ### Data Instances An example of a dataset instance in Argilla looks as follows: While the same record in HuggingFace 'datasets' looks as follows: ### Data Fields Among the dataset fields, we differentiate between the following: * Fields: These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. + prompt is of type 'text'. + reply\_1 is of type 'text'. + reply\_2 is of type 'text'. + reply\_3 is of type 'text'. * Questions: These are the questions that will be asked to the annotators. They can be of different types, such as 'RatingQuestion', 'TextQuestion', 'LabelQuestion', 'MultiLabelQuestion', and 'RankingQuestion'. + reply-ranking is of type 'ranking' with the following allowed values ['reply-1', 'reply-2', 'reply-3'], and description "Order the replies from 1 to 3 based on your preference.". * Suggestions: As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable. + (optional) reply-ranking-suggestion is of type 'ranking' with the following allowed values ['reply-1', 'reply-2', 'reply-3']. Additionally, we also have two more fields that are optional and are the following: * metadata: This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\_properties' defined in the dataset configuration file in 'URL'. * external\_id: This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file. ### Data Splits The dataset contains a single split, which is 'train'. Dataset Creation ---------------- ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation guidelines For each promt, order the replies in a ranking based on how clear and helpful you find each reply. Ties are allowed. If you prefer not to give an answer, click Discard and move on to the next record. #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information Considerations for Using the Data --------------------------------- ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations Additional Information ---------------------- ### Dataset Curators ### Licensing Information ### Contributions
[ "### Dataset Summary\n\n\nThis dataset contains:\n\n\n* A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\\_huggingface' method in Argilla.\n* Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\\_huggingface' and can be loaded independently using the 'datasets' library via 'load\\_dataset'.\n* The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla.", "### Load with Argilla\n\n\nTo load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code:", "### Load with 'datasets'\n\n\nTo load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code:", "### Supported Tasks and Leaderboards\n\n\nThis dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section.\n\n\nThere are no leaderboards associated with this dataset.", "### Languages\n\n\nDataset Structure\n-----------------", "### Data in Argilla\n\n\nThe dataset is created in Argilla with: fields, questions, suggestions, metadata, vectors, and guidelines.\n\n\nThe fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\nThe questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\\_selection, multi\\_label\\_selection, or ranking.\n\n\n\nThe suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending \"-suggestion\" and \"-suggestion-metadata\" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with \"-suggestion\" and the metadata is appended with \"-suggestion-metadata\".\n\n\nThe metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n\n\n\nThe guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section.", "### Data Instances\n\n\nAn example of a dataset instance in Argilla looks as follows:\n\n\nWhile the same record in HuggingFace 'datasets' looks as follows:", "### Data Fields\n\n\nAmong the dataset fields, we differentiate between the following:\n\n\n* Fields: These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\t+ prompt is of type 'text'.\n\t+ reply\\_1 is of type 'text'.\n\t+ reply\\_2 is of type 'text'.\n\t+ reply\\_3 is of type 'text'.\n* Questions: These are the questions that will be asked to the annotators. They can be of different types, such as 'RatingQuestion', 'TextQuestion', 'LabelQuestion', 'MultiLabelQuestion', and 'RankingQuestion'.\n\n\n\t+ reply-ranking is of type 'ranking' with the following allowed values ['reply-1', 'reply-2', 'reply-3'], and description \"Order the replies from 1 to 3 based on your preference.\".\n* Suggestions: As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable.\n\n\n\t+ (optional) reply-ranking-suggestion is of type 'ranking' with the following allowed values ['reply-1', 'reply-2', 'reply-3'].\n\n\nAdditionally, we also have two more fields that are optional and are the following:\n\n\n* metadata: This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n* external\\_id: This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file.", "### Data Splits\n\n\nThe dataset contains a single split, which is 'train'.\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation guidelines\n\n\nFor each promt, order the replies in a ranking based on how clear and helpful you find each reply. Ties are allowed. If you prefer not to give an answer, click Discard and move on to the next record.", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#size_categories-1K<n<10K #rlfh #argilla #human-feedback #region-us \n", "### Dataset Summary\n\n\nThis dataset contains:\n\n\n* A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\\_huggingface' method in Argilla.\n* Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\\_huggingface' and can be loaded independently using the 'datasets' library via 'load\\_dataset'.\n* The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla.", "### Load with Argilla\n\n\nTo load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code:", "### Load with 'datasets'\n\n\nTo load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code:", "### Supported Tasks and Leaderboards\n\n\nThis dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section.\n\n\nThere are no leaderboards associated with this dataset.", "### Languages\n\n\nDataset Structure\n-----------------", "### Data in Argilla\n\n\nThe dataset is created in Argilla with: fields, questions, suggestions, metadata, vectors, and guidelines.\n\n\nThe fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\nThe questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\\_selection, multi\\_label\\_selection, or ranking.\n\n\n\nThe suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending \"-suggestion\" and \"-suggestion-metadata\" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with \"-suggestion\" and the metadata is appended with \"-suggestion-metadata\".\n\n\nThe metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n\n\n\nThe guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section.", "### Data Instances\n\n\nAn example of a dataset instance in Argilla looks as follows:\n\n\nWhile the same record in HuggingFace 'datasets' looks as follows:", "### Data Fields\n\n\nAmong the dataset fields, we differentiate between the following:\n\n\n* Fields: These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\t+ prompt is of type 'text'.\n\t+ reply\\_1 is of type 'text'.\n\t+ reply\\_2 is of type 'text'.\n\t+ reply\\_3 is of type 'text'.\n* Questions: These are the questions that will be asked to the annotators. They can be of different types, such as 'RatingQuestion', 'TextQuestion', 'LabelQuestion', 'MultiLabelQuestion', and 'RankingQuestion'.\n\n\n\t+ reply-ranking is of type 'ranking' with the following allowed values ['reply-1', 'reply-2', 'reply-3'], and description \"Order the replies from 1 to 3 based on your preference.\".\n* Suggestions: As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable.\n\n\n\t+ (optional) reply-ranking-suggestion is of type 'ranking' with the following allowed values ['reply-1', 'reply-2', 'reply-3'].\n\n\nAdditionally, we also have two more fields that are optional and are the following:\n\n\n* metadata: This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n* external\\_id: This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file.", "### Data Splits\n\n\nThe dataset contains a single split, which is 'train'.\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation guidelines\n\n\nFor each promt, order the replies in a ranking based on how clear and helpful you find each reply. Ties are allowed. If you prefer not to give an answer, click Discard and move on to the next record.", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 29, 162, 40, 53, 68, 11, 404, 40, 547, 27, 7, 4, 10, 10, 5, 52, 5, 9, 18, 7, 8, 14, 6, 6, 5 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #rlfh #argilla #human-feedback #region-us \n### Dataset Summary\n\n\nThis dataset contains:\n\n\n* A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\\_huggingface' method in Argilla.\n* Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\\_huggingface' and can be loaded independently using the 'datasets' library via 'load\\_dataset'.\n* The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla.### Load with Argilla\n\n\nTo load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code:### Load with 'datasets'\n\n\nTo load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code:### Supported Tasks and Leaderboards\n\n\nThis dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section.\n\n\nThere are no leaderboards associated with this dataset.### Languages\n\n\nDataset Structure\n-----------------", "passage: ### Data in Argilla\n\n\nThe dataset is created in Argilla with: fields, questions, suggestions, metadata, vectors, and guidelines.\n\n\nThe fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\nThe questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\\_selection, multi\\_label\\_selection, or ranking.\n\n\n\nThe suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending \"-suggestion\" and \"-suggestion-metadata\" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with \"-suggestion\" and the metadata is appended with \"-suggestion-metadata\".\n\n\nThe metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n\n\n\nThe guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section.### Data Instances\n\n\nAn example of a dataset instance in Argilla looks as follows:\n\n\nWhile the same record in HuggingFace 'datasets' looks as follows:" ]
4d28fddd820f9c5f9ecf6a2eca70026b2089dce7
# MatSynth MatSynth is a Physically Based Rendering (PBR) materials dataset designed for modern AI applications. This dataset consists of over 4,000 ultra-high resolution, offering unparalleled scale, diversity, and detail. Meticulously collected and curated, MatSynth is poised to drive innovation in material acquisition and generation applications, providing a rich resource for researchers, developers, and enthusiasts in computer graphics and related fields. ## Dataset Details ### Dataset Description MatSynth is a new large-scale dataset comprising over 4,000 ultra-high resolution Physically Based Rendering (PBR) materials, all released under permissive licensing. All materials in the dataset are represented by a common set of maps (*Basecolor*, *Diffuse*, *Normal*, *Height*, *Roughness*, *Metallic*, *Specular* and, when useful, *Opacity*), modelling both the reflectance and mesostructure of the material. Each material in the dataset comes with rich metadata, including information on its origin, licensing details, category, tags, creation method, and, when available, descriptions and physical size. This comprehensive metadata facilitates precise material selection and usage, catering to the specific needs of users. ## Dataset Structure The MatSynth dataset is divided into two splits: the test split, containing 89 materials, and the train split, consisting of 3,980 materials. To enhance accessibility and ease of navigation, each split is further organized into separate folders for each distinct category present in the dataset (Blends, Ceramic, Concrete, Fabric, Ground, Leather, Marble, Metal, Misc, Plastic, Plaster, Stone, Terracotta, Wood). ## Dataset Creation The MatSynth dataset is designed to support modern, learning-based techniques for a variety of material-related tasks including, but not limited to, material acquisition, material generation and synthetic data generation e.g. for retrieval or segmentation. ### Source Data The MatSynth dataset is the result of an extensively collection of data from multiple online sources operating under the CC0 and CC-BY licensing framework. This collection strategy allows to capture a broad spectrum of materials, from commonly used ones to more niche or specialized variants while guaranteeing that the data can be used for a variety of usecases. Materials under CC0 license were collected from [AmbientCG](https://ambientcg.com/), [CGBookCase](https://www.cgbookcase.com/), [PolyHeaven](https://polyhaven.com/), [ShateTexture](https://www.sharetextures.com/), and [TextureCan](https://www.texturecan.com/). The dataset also includes limited set of materials from the artist [Julio Sillet](https://juliosillet.gumroad.com/), distributed under CC-BY license. We collected over 6000 materials which we meticulously filter to keep only tileable, 4K materials. This high resolution allows us to extract many different crops from each sample at different scale for augmentation. Additionally, we discard blurry or low-quality materials (by visual inspection). The resulting dataset consists of 3736 unique materials which we augment by blending semantically compatible materials (e.g.: snow over ground). In total, our dataset contains 4069 unique 4K materials. ### Annotations The dataset is composed of material maps (Basecolor, Diffuse, Normal, Height, Roughness, Metallic, Specular and, when useful, opacity) and associated renderings under varying environmental illuminations, and multi-scale crops. We adopt the OpenGL standard for the Normal map (Y-axis pointing upward). The Height map is given in a 16-bit single channel format for higher precision. In addition to these maps, the dataset includes other annotations providing context to each material: the capture method (photogrammetry, procedural generation, or approximation); list of descriptive tags; source name (website); source link; licensing and a timestamps for eventual future versioning. For a subset of materials, when the information is available, we also provide the author name (387), text description (572) and a physical size, presented as the length of the edge in centimeters (358). ## Citation ``` @article{vecchio2024matsynth, title={MatSynth: A Modern PBR Materials Dataset}, author={Vecchio, Giuseppe and Deschaintre, Valentin}, journal={arXiv preprint arXiv:2401.06056}, year={2024} } ```
gvecchio/MatSynth
[ "task_categories:image-to-image", "task_categories:unconditional-image-generation", "task_categories:image-classification", "task_categories:text-to-image", "language:en", "materials", "pbr", "4d", "graphics", "rendering", "svbrdf", "region:us" ]
2023-11-29T11:24:57+00:00
{"language": ["en"], "task_categories": ["image-to-image", "unconditional-image-generation", "image-classification", "text-to-image"], "pretty_name": "MatSynth", "tags": ["materials", "pbr", "4d", "graphics", "rendering", "svbrdf"]}
2024-01-16T22:26:33+00:00
[]
[ "en" ]
TAGS #task_categories-image-to-image #task_categories-unconditional-image-generation #task_categories-image-classification #task_categories-text-to-image #language-English #materials #pbr #4d #graphics #rendering #svbrdf #region-us
# MatSynth MatSynth is a Physically Based Rendering (PBR) materials dataset designed for modern AI applications. This dataset consists of over 4,000 ultra-high resolution, offering unparalleled scale, diversity, and detail. Meticulously collected and curated, MatSynth is poised to drive innovation in material acquisition and generation applications, providing a rich resource for researchers, developers, and enthusiasts in computer graphics and related fields. ## Dataset Details ### Dataset Description MatSynth is a new large-scale dataset comprising over 4,000 ultra-high resolution Physically Based Rendering (PBR) materials, all released under permissive licensing. All materials in the dataset are represented by a common set of maps (*Basecolor*, *Diffuse*, *Normal*, *Height*, *Roughness*, *Metallic*, *Specular* and, when useful, *Opacity*), modelling both the reflectance and mesostructure of the material. Each material in the dataset comes with rich metadata, including information on its origin, licensing details, category, tags, creation method, and, when available, descriptions and physical size. This comprehensive metadata facilitates precise material selection and usage, catering to the specific needs of users. ## Dataset Structure The MatSynth dataset is divided into two splits: the test split, containing 89 materials, and the train split, consisting of 3,980 materials. To enhance accessibility and ease of navigation, each split is further organized into separate folders for each distinct category present in the dataset (Blends, Ceramic, Concrete, Fabric, Ground, Leather, Marble, Metal, Misc, Plastic, Plaster, Stone, Terracotta, Wood). ## Dataset Creation The MatSynth dataset is designed to support modern, learning-based techniques for a variety of material-related tasks including, but not limited to, material acquisition, material generation and synthetic data generation e.g. for retrieval or segmentation. ### Source Data The MatSynth dataset is the result of an extensively collection of data from multiple online sources operating under the CC0 and CC-BY licensing framework. This collection strategy allows to capture a broad spectrum of materials, from commonly used ones to more niche or specialized variants while guaranteeing that the data can be used for a variety of usecases. Materials under CC0 license were collected from AmbientCG, CGBookCase, PolyHeaven, ShateTexture, and TextureCan. The dataset also includes limited set of materials from the artist Julio Sillet, distributed under CC-BY license. We collected over 6000 materials which we meticulously filter to keep only tileable, 4K materials. This high resolution allows us to extract many different crops from each sample at different scale for augmentation. Additionally, we discard blurry or low-quality materials (by visual inspection). The resulting dataset consists of 3736 unique materials which we augment by blending semantically compatible materials (e.g.: snow over ground). In total, our dataset contains 4069 unique 4K materials. ### Annotations The dataset is composed of material maps (Basecolor, Diffuse, Normal, Height, Roughness, Metallic, Specular and, when useful, opacity) and associated renderings under varying environmental illuminations, and multi-scale crops. We adopt the OpenGL standard for the Normal map (Y-axis pointing upward). The Height map is given in a 16-bit single channel format for higher precision. In addition to these maps, the dataset includes other annotations providing context to each material: the capture method (photogrammetry, procedural generation, or approximation); list of descriptive tags; source name (website); source link; licensing and a timestamps for eventual future versioning. For a subset of materials, when the information is available, we also provide the author name (387), text description (572) and a physical size, presented as the length of the edge in centimeters (358).
[ "# MatSynth\n\nMatSynth is a Physically Based Rendering (PBR) materials dataset designed for modern AI applications.\nThis dataset consists of over 4,000 ultra-high resolution, offering unparalleled scale, diversity, and detail. \n\nMeticulously collected and curated, MatSynth is poised to drive innovation in material acquisition and generation applications, providing a rich resource for researchers, developers, and enthusiasts in computer graphics and related fields.", "## Dataset Details", "### Dataset Description\n\nMatSynth is a new large-scale dataset comprising over 4,000 ultra-high resolution Physically Based Rendering (PBR) materials, \nall released under permissive licensing.\n\nAll materials in the dataset are represented by a common set of maps (*Basecolor*, *Diffuse*, *Normal*, *Height*, *Roughness*, *Metallic*, *Specular* and, when useful, *Opacity*), \nmodelling both the reflectance and mesostructure of the material.\n\nEach material in the dataset comes with rich metadata, including information on its origin, licensing details, category, tags, creation method, \nand, when available, descriptions and physical size. \nThis comprehensive metadata facilitates precise material selection and usage, catering to the specific needs of users.", "## Dataset Structure\n \nThe MatSynth dataset is divided into two splits: the test split, containing 89 materials, and the train split, consisting of 3,980 materials. \nTo enhance accessibility and ease of navigation, each split is further organized into separate folders for each distinct category present in the dataset (Blends, Ceramic, Concrete, Fabric, Ground, Leather, Marble, Metal, Misc, Plastic, Plaster, Stone, Terracotta, Wood).", "## Dataset Creation\n\nThe MatSynth dataset is designed to support modern, learning-based techniques for a variety of material-related tasks including, \nbut not limited to, material acquisition, material generation and synthetic data generation e.g. for retrieval or segmentation.", "### Source Data\n\nThe MatSynth dataset is the result of an extensively collection of data from multiple online sources operating under the CC0 and CC-BY licensing framework. \nThis collection strategy allows to capture a broad spectrum of materials, \nfrom commonly used ones to more niche or specialized variants while guaranteeing that the data can be used for a variety of usecases. \n\nMaterials under CC0 license were collected from AmbientCG, CGBookCase, PolyHeaven, \nShateTexture, and TextureCan.\nThe dataset also includes limited set of materials from the artist Julio Sillet, distributed under CC-BY license.\n\nWe collected over 6000 materials which we meticulously filter to keep only tileable, 4K materials. \nThis high resolution allows us to extract many different crops from each sample at different scale for augmentation. \nAdditionally, we discard blurry or low-quality materials (by visual inspection). \nThe resulting dataset consists of 3736 unique materials which we augment by blending semantically compatible materials (e.g.: snow over ground). \nIn total, our dataset contains 4069 unique 4K materials.", "### Annotations\n\nThe dataset is composed of material maps (Basecolor, Diffuse, Normal, Height, Roughness, Metallic, Specular and, when useful, opacity) \nand associated renderings under varying environmental illuminations, and multi-scale crops.\nWe adopt the OpenGL standard for the Normal map (Y-axis pointing upward). \nThe Height map is given in a 16-bit single channel format for higher precision.\n\nIn addition to these maps, the dataset includes other annotations providing context to each material: \nthe capture method (photogrammetry, procedural generation, or approximation); \nlist of descriptive tags; source name (website); source link; \nlicensing and a timestamps for eventual future versioning. \nFor a subset of materials, when the information is available, we also provide the author name (387), text description (572) and a physical size, \npresented as the length of the edge in centimeters (358)." ]
[ "TAGS\n#task_categories-image-to-image #task_categories-unconditional-image-generation #task_categories-image-classification #task_categories-text-to-image #language-English #materials #pbr #4d #graphics #rendering #svbrdf #region-us \n", "# MatSynth\n\nMatSynth is a Physically Based Rendering (PBR) materials dataset designed for modern AI applications.\nThis dataset consists of over 4,000 ultra-high resolution, offering unparalleled scale, diversity, and detail. \n\nMeticulously collected and curated, MatSynth is poised to drive innovation in material acquisition and generation applications, providing a rich resource for researchers, developers, and enthusiasts in computer graphics and related fields.", "## Dataset Details", "### Dataset Description\n\nMatSynth is a new large-scale dataset comprising over 4,000 ultra-high resolution Physically Based Rendering (PBR) materials, \nall released under permissive licensing.\n\nAll materials in the dataset are represented by a common set of maps (*Basecolor*, *Diffuse*, *Normal*, *Height*, *Roughness*, *Metallic*, *Specular* and, when useful, *Opacity*), \nmodelling both the reflectance and mesostructure of the material.\n\nEach material in the dataset comes with rich metadata, including information on its origin, licensing details, category, tags, creation method, \nand, when available, descriptions and physical size. \nThis comprehensive metadata facilitates precise material selection and usage, catering to the specific needs of users.", "## Dataset Structure\n \nThe MatSynth dataset is divided into two splits: the test split, containing 89 materials, and the train split, consisting of 3,980 materials. \nTo enhance accessibility and ease of navigation, each split is further organized into separate folders for each distinct category present in the dataset (Blends, Ceramic, Concrete, Fabric, Ground, Leather, Marble, Metal, Misc, Plastic, Plaster, Stone, Terracotta, Wood).", "## Dataset Creation\n\nThe MatSynth dataset is designed to support modern, learning-based techniques for a variety of material-related tasks including, \nbut not limited to, material acquisition, material generation and synthetic data generation e.g. for retrieval or segmentation.", "### Source Data\n\nThe MatSynth dataset is the result of an extensively collection of data from multiple online sources operating under the CC0 and CC-BY licensing framework. \nThis collection strategy allows to capture a broad spectrum of materials, \nfrom commonly used ones to more niche or specialized variants while guaranteeing that the data can be used for a variety of usecases. \n\nMaterials under CC0 license were collected from AmbientCG, CGBookCase, PolyHeaven, \nShateTexture, and TextureCan.\nThe dataset also includes limited set of materials from the artist Julio Sillet, distributed under CC-BY license.\n\nWe collected over 6000 materials which we meticulously filter to keep only tileable, 4K materials. \nThis high resolution allows us to extract many different crops from each sample at different scale for augmentation. \nAdditionally, we discard blurry or low-quality materials (by visual inspection). \nThe resulting dataset consists of 3736 unique materials which we augment by blending semantically compatible materials (e.g.: snow over ground). \nIn total, our dataset contains 4069 unique 4K materials.", "### Annotations\n\nThe dataset is composed of material maps (Basecolor, Diffuse, Normal, Height, Roughness, Metallic, Specular and, when useful, opacity) \nand associated renderings under varying environmental illuminations, and multi-scale crops.\nWe adopt the OpenGL standard for the Normal map (Y-axis pointing upward). \nThe Height map is given in a 16-bit single channel format for higher precision.\n\nIn addition to these maps, the dataset includes other annotations providing context to each material: \nthe capture method (photogrammetry, procedural generation, or approximation); \nlist of descriptive tags; source name (website); source link; \nlicensing and a timestamps for eventual future versioning. \nFor a subset of materials, when the information is available, we also provide the author name (387), text description (572) and a physical size, \npresented as the length of the edge in centimeters (358)." ]
[ 80, 110, 4, 189, 114, 62, 256, 217 ]
[ "passage: TAGS\n#task_categories-image-to-image #task_categories-unconditional-image-generation #task_categories-image-classification #task_categories-text-to-image #language-English #materials #pbr #4d #graphics #rendering #svbrdf #region-us \n# MatSynth\n\nMatSynth is a Physically Based Rendering (PBR) materials dataset designed for modern AI applications.\nThis dataset consists of over 4,000 ultra-high resolution, offering unparalleled scale, diversity, and detail. \n\nMeticulously collected and curated, MatSynth is poised to drive innovation in material acquisition and generation applications, providing a rich resource for researchers, developers, and enthusiasts in computer graphics and related fields.## Dataset Details### Dataset Description\n\nMatSynth is a new large-scale dataset comprising over 4,000 ultra-high resolution Physically Based Rendering (PBR) materials, \nall released under permissive licensing.\n\nAll materials in the dataset are represented by a common set of maps (*Basecolor*, *Diffuse*, *Normal*, *Height*, *Roughness*, *Metallic*, *Specular* and, when useful, *Opacity*), \nmodelling both the reflectance and mesostructure of the material.\n\nEach material in the dataset comes with rich metadata, including information on its origin, licensing details, category, tags, creation method, \nand, when available, descriptions and physical size. \nThis comprehensive metadata facilitates precise material selection and usage, catering to the specific needs of users.## Dataset Structure\n \nThe MatSynth dataset is divided into two splits: the test split, containing 89 materials, and the train split, consisting of 3,980 materials. \nTo enhance accessibility and ease of navigation, each split is further organized into separate folders for each distinct category present in the dataset (Blends, Ceramic, Concrete, Fabric, Ground, Leather, Marble, Metal, Misc, Plastic, Plaster, Stone, Terracotta, Wood)." ]
d4e4c8f4799e80ac4c85837c0652a17ef1b85213
# Dataset Card for "dpo_anthropic_hh_rlhf" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Palash123/dpo_anthropic_hh_rlhf
[ "region:us" ]
2023-11-29T11:27:11+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "chosen", "dtype": "string"}, {"name": "rejected", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 186630889, "num_examples": 159280}, {"name": "test", "num_bytes": 9980924, "num_examples": 8467}], "download_size": 0, "dataset_size": 196611813}}
2023-12-04T07:17:50+00:00
[]
[]
TAGS #region-us
# Dataset Card for "dpo_anthropic_hh_rlhf" More Information needed
[ "# Dataset Card for \"dpo_anthropic_hh_rlhf\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"dpo_anthropic_hh_rlhf\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"dpo_anthropic_hh_rlhf\"\n\nMore Information needed" ]
1e3aefaaa8926e66cfd6675513402f5c3b2b753f
# Dataset Card for Dataset Name <!-- Provide a quick summary of the dataset. --> This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
AkilaRajini/MES-Dataset-1
[ "region:us" ]
2023-11-29T11:31:35+00:00
{}
2023-11-29T11:32:22+00:00
[]
[]
TAGS #region-us
# Dataset Card for Dataset Name This dataset card aims to be a base template for new datasets. It has been generated using this raw template. ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 34, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
2d4cc05fd0c0932e75ad1b58ff5942f17c3a34a3
# Dataset Card for "filtered_big_literalist_ds" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tr416/filtered_big_literalist_ds
[ "region:us" ]
2023-11-29T11:33:58+00:00
{"dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 953191, "num_examples": 764}], "download_size": 483558, "dataset_size": 953191}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-29T11:34:01+00:00
[]
[]
TAGS #region-us
# Dataset Card for "filtered_big_literalist_ds" More Information needed
[ "# Dataset Card for \"filtered_big_literalist_ds\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"filtered_big_literalist_ds\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"filtered_big_literalist_ds\"\n\nMore Information needed" ]
efc364239b42d71c137846651cd96e92cbb7996b
(This dataset contains raw text, which are unlabeled.) 5,844 Turkish news articles obtained from the Milliyet newspaper between 9 September 2009 and 31 October 2009 Github Repo: https://github.com/BilkentInformationRetrievalGroup/BilFront2009 # If you would like to use any material in this repository, please cite the following paper: Toraman, C., & Can, F. (2015). A front-page news-selection algorithm based on topic modelling using raw text. Journal of Information Science, 41(5), 676-685.
ctoraman/front-page-news
[ "task_categories:text-generation", "task_categories:text-classification", "language:tr", "license:cc-by-nc-sa-4.0", "front-page-news", "front-page", "news", "region:us" ]
2023-11-29T11:36:47+00:00
{"language": ["tr"], "license": "cc-by-nc-sa-4.0", "task_categories": ["text-generation", "text-classification"], "tags": ["front-page-news", "front-page", "news"]}
2023-11-29T12:17:29+00:00
[]
[ "tr" ]
TAGS #task_categories-text-generation #task_categories-text-classification #language-Turkish #license-cc-by-nc-sa-4.0 #front-page-news #front-page #news #region-us
(This dataset contains raw text, which are unlabeled.) 5,844 Turkish news articles obtained from the Milliyet newspaper between 9 September 2009 and 31 October 2009 Github Repo: URL # If you would like to use any material in this repository, please cite the following paper: Toraman, C., & Can, F. (2015). A front-page news-selection algorithm based on topic modelling using raw text. Journal of Information Science, 41(5), 676-685.
[ "# If you would like to use any material in this repository, please cite the following paper:\nToraman, C., & Can, F. (2015). A front-page news-selection algorithm based on topic modelling using raw text. Journal of Information Science, 41(5), 676-685." ]
[ "TAGS\n#task_categories-text-generation #task_categories-text-classification #language-Turkish #license-cc-by-nc-sa-4.0 #front-page-news #front-page #news #region-us \n", "# If you would like to use any material in this repository, please cite the following paper:\nToraman, C., & Can, F. (2015). A front-page news-selection algorithm based on topic modelling using raw text. Journal of Information Science, 41(5), 676-685." ]
[ 59, 65 ]
[ "passage: TAGS\n#task_categories-text-generation #task_categories-text-classification #language-Turkish #license-cc-by-nc-sa-4.0 #front-page-news #front-page #news #region-us \n# If you would like to use any material in this repository, please cite the following paper:\nToraman, C., & Can, F. (2015). A front-page news-selection algorithm based on topic modelling using raw text. Journal of Information Science, 41(5), 676-685." ]
38eacd4a6cf4e19b23f4849d9a111da945545450
# Public Protest Event Detection in Turkish 80 news events/articles occurred between 2015 and 017. Each event is listed with its origin date, place, news url, public-reaction category, and reaction tags. Protest labels are determined in terms of dimensions and directions. Dimensions are in terms of national, local, and social media. National categories represent public reactions occurred in at least two different cities. Local categories have events occurred at only a specific place. Social categories represent reactions that people share opinions only in social media, such as microblogs. Directions are either negative or positive. Overall, there are 7 classes: - national positive - national negative - local positive - local negative - social positive - social negative - no reaction GitHub Repo: https://github.com/BilkentInformationRetrievalGroup/BilPredict2017 # If you would like to use any material in this repository, please cite the following paper: - Toraman, C. Early Prediction of Public Reactions to News Events Using Microblogs. Seventh BCS-IRSG Symposium on Future Directions in Information Access (FDIA 2017), Barcelona, Spain, 5 September 2017.
ctoraman/protest-event-prediction
[ "task_categories:text-classification", "language:tr", "license:cc-by-nc-sa-4.0", "protest event detection", "protest detection", "public reaction", "region:us" ]
2023-11-29T11:52:02+00:00
{"language": ["tr"], "license": "cc-by-nc-sa-4.0", "task_categories": ["text-classification"], "tags": ["protest event detection", "protest detection", "public reaction"]}
2023-11-29T12:02:47+00:00
[]
[ "tr" ]
TAGS #task_categories-text-classification #language-Turkish #license-cc-by-nc-sa-4.0 #protest event detection #protest detection #public reaction #region-us
# Public Protest Event Detection in Turkish 80 news events/articles occurred between 2015 and 017. Each event is listed with its origin date, place, news url, public-reaction category, and reaction tags. Protest labels are determined in terms of dimensions and directions. Dimensions are in terms of national, local, and social media. National categories represent public reactions occurred in at least two different cities. Local categories have events occurred at only a specific place. Social categories represent reactions that people share opinions only in social media, such as microblogs. Directions are either negative or positive. Overall, there are 7 classes: - national positive - national negative - local positive - local negative - social positive - social negative - no reaction GitHub Repo: URL # If you would like to use any material in this repository, please cite the following paper: - Toraman, C. Early Prediction of Public Reactions to News Events Using Microblogs. Seventh BCS-IRSG Symposium on Future Directions in Information Access (FDIA 2017), Barcelona, Spain, 5 September 2017.
[ "# Public Protest Event Detection in Turkish\n\n80 news events/articles occurred between 2015 and 017. Each event is listed with its origin date, place, news url, public-reaction category, and reaction tags.\n\nProtest labels are determined in terms of dimensions and directions.\n\nDimensions are in terms of national, local, and social media. National categories represent public reactions occurred in at least two different cities. Local categories have events occurred at only a specific place. Social categories\nrepresent reactions that people share opinions only in social media, such as microblogs. \n\nDirections are either negative or positive. \n\nOverall, there are 7 classes: \n\n- national positive\n- national negative\n- local positive\n- local negative\n- social positive\n- social negative\n- no reaction\n\nGitHub Repo: URL", "# If you would like to use any material in this repository, please cite the following paper:\n- Toraman, C. Early Prediction of Public Reactions to News Events Using Microblogs. Seventh BCS-IRSG Symposium on Future Directions in Information Access (FDIA 2017), Barcelona, Spain, 5 September 2017." ]
[ "TAGS\n#task_categories-text-classification #language-Turkish #license-cc-by-nc-sa-4.0 #protest event detection #protest detection #public reaction #region-us \n", "# Public Protest Event Detection in Turkish\n\n80 news events/articles occurred between 2015 and 017. Each event is listed with its origin date, place, news url, public-reaction category, and reaction tags.\n\nProtest labels are determined in terms of dimensions and directions.\n\nDimensions are in terms of national, local, and social media. National categories represent public reactions occurred in at least two different cities. Local categories have events occurred at only a specific place. Social categories\nrepresent reactions that people share opinions only in social media, such as microblogs. \n\nDirections are either negative or positive. \n\nOverall, there are 7 classes: \n\n- national positive\n- national negative\n- local positive\n- local negative\n- social positive\n- social negative\n- no reaction\n\nGitHub Repo: URL", "# If you would like to use any material in this repository, please cite the following paper:\n- Toraman, C. Early Prediction of Public Reactions to News Events Using Microblogs. Seventh BCS-IRSG Symposium on Future Directions in Information Access (FDIA 2017), Barcelona, Spain, 5 September 2017." ]
[ 50, 170, 77 ]
[ "passage: TAGS\n#task_categories-text-classification #language-Turkish #license-cc-by-nc-sa-4.0 #protest event detection #protest detection #public reaction #region-us \n# Public Protest Event Detection in Turkish\n\n80 news events/articles occurred between 2015 and 017. Each event is listed with its origin date, place, news url, public-reaction category, and reaction tags.\n\nProtest labels are determined in terms of dimensions and directions.\n\nDimensions are in terms of national, local, and social media. National categories represent public reactions occurred in at least two different cities. Local categories have events occurred at only a specific place. Social categories\nrepresent reactions that people share opinions only in social media, such as microblogs. \n\nDirections are either negative or positive. \n\nOverall, there are 7 classes: \n\n- national positive\n- national negative\n- local positive\n- local negative\n- social positive\n- social negative\n- no reaction\n\nGitHub Repo: URL# If you would like to use any material in this repository, please cite the following paper:\n- Toraman, C. Early Prediction of Public Reactions to News Events Using Microblogs. Seventh BCS-IRSG Symposium on Future Directions in Information Access (FDIA 2017), Barcelona, Spain, 5 September 2017." ]
e4b4653d68cde5ba90badc2a5558b8ecd47ca125
(This dataset contains raw text, which are unlabeled.) 1,656 Turkish news articles from Sözcü Newspaper (http://www.sozcu.com.tr) between December 20, 2013, and March 11, 2014. GitHub Repo: https://github.com/BilkentInformationRetrievalGroup/TUBITAK113E249/ # If you would like to use any material in this repository, please cite this paper: Toraman, C. and Can, F. (2017), Discovering story chains: A framework based on zigzagged search and news actors. Journal of the Association for Information Science and Technology, 68: 2795-2808. https://doi.org/10.1002/asi.23885
ctoraman/sozcu-news-2014
[ "task_categories:text-generation", "task_categories:text-classification", "language:tr", "license:cc-by-nc-sa-4.0", "news", "corpus", "news articles", "region:us" ]
2023-11-29T12:06:32+00:00
{"language": ["tr"], "license": "cc-by-nc-sa-4.0", "task_categories": ["text-generation", "text-classification"], "tags": ["news", "corpus", "news articles"]}
2023-11-29T12:17:44+00:00
[]
[ "tr" ]
TAGS #task_categories-text-generation #task_categories-text-classification #language-Turkish #license-cc-by-nc-sa-4.0 #news #corpus #news articles #region-us
(This dataset contains raw text, which are unlabeled.) 1,656 Turkish news articles from Sözcü Newspaper (URL) between December 20, 2013, and March 11, 2014. GitHub Repo: URL # If you would like to use any material in this repository, please cite this paper: Toraman, C. and Can, F. (2017), Discovering story chains: A framework based on zigzagged search and news actors. Journal of the Association for Information Science and Technology, 68: 2795-2808. URL
[ "# If you would like to use any material in this repository, please cite this paper:\nToraman, C. and Can, F. (2017), Discovering story chains: A framework based on zigzagged search and news actors. Journal of the Association for Information Science and Technology, 68: 2795-2808. URL" ]
[ "TAGS\n#task_categories-text-generation #task_categories-text-classification #language-Turkish #license-cc-by-nc-sa-4.0 #news #corpus #news articles #region-us \n", "# If you would like to use any material in this repository, please cite this paper:\nToraman, C. and Can, F. (2017), Discovering story chains: A framework based on zigzagged search and news actors. Journal of the Association for Information Science and Technology, 68: 2795-2808. URL" ]
[ 55, 70 ]
[ "passage: TAGS\n#task_categories-text-generation #task_categories-text-classification #language-Turkish #license-cc-by-nc-sa-4.0 #news #corpus #news articles #region-us \n# If you would like to use any material in this repository, please cite this paper:\nToraman, C. and Can, F. (2017), Discovering story chains: A framework based on zigzagged search and news actors. Journal of the Association for Information Science and Technology, 68: 2795-2808. URL" ]
49bfb470a921a09a2222945b19dc34c77b973974
# Dataset Card for Dataset Name <!-- Provide a quick summary of the dataset. --> This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
hoangquang27/validation-data
[ "region:us" ]
2023-11-29T12:22:53+00:00
{}
2023-11-29T12:26:48+00:00
[]
[]
TAGS #region-us
# Dataset Card for Dataset Name This dataset card aims to be a base template for new datasets. It has been generated using this raw template. ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 34, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
28606b504bb7b2c7e04f06a156740395e9c5586b
# Dataset Card for Wikiquotes German This dataset contains german quotes from [wikiquote](https://de.wikiquote.org/wiki/Hauptseite). It consists of two columns named 'author' and 'quote'. For regenerating the dataset we provided the source code in this repo. You can use it as follows: ```sh pip install bs4 pandas python CrawlingQuotes.py ``` For usag in python just include ```python from datasets import load_dataset training_data = load_dataset("caretech-owl/wikiquote-de-quotes", split="train") ``` after installing 🤗 datasets (```pip install datasets```). * **Attribution**: Raw data was retrieved from https://dumps.wikimedia.org/dewikiquote/ on November 27th. * **License**: [CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/deed.en)
caretech-owl/wikiquote-de-quotes
[ "language:de", "license:cc-by-sa-4.0", "region:us" ]
2023-11-29T12:52:19+00:00
{"language": ["de"], "license": "cc-by-sa-4.0"}
2023-12-22T12:49:04+00:00
[]
[ "de" ]
TAGS #language-German #license-cc-by-sa-4.0 #region-us
# Dataset Card for Wikiquotes German This dataset contains german quotes from wikiquote. It consists of two columns named 'author' and 'quote'. For regenerating the dataset we provided the source code in this repo. You can use it as follows: For usag in python just include after installing datasets (). * Attribution: Raw data was retrieved from URL on November 27th. * License: CC BY-SA 4.0
[ "# Dataset Card for Wikiquotes German\nThis dataset contains german quotes from wikiquote. It consists of two columns named 'author' and 'quote'.\nFor regenerating the dataset we provided the source code in this repo. You can use it as follows:\n\n\nFor usag in python just include \n\nafter installing datasets ().\n\n * Attribution: Raw data was retrieved from URL on November 27th.\n * License: CC BY-SA 4.0" ]
[ "TAGS\n#language-German #license-cc-by-sa-4.0 #region-us \n", "# Dataset Card for Wikiquotes German\nThis dataset contains german quotes from wikiquote. It consists of two columns named 'author' and 'quote'.\nFor regenerating the dataset we provided the source code in this repo. You can use it as follows:\n\n\nFor usag in python just include \n\nafter installing datasets ().\n\n * Attribution: Raw data was retrieved from URL on November 27th.\n * License: CC BY-SA 4.0" ]
[ 21, 104 ]
[ "passage: TAGS\n#language-German #license-cc-by-sa-4.0 #region-us \n# Dataset Card for Wikiquotes German\nThis dataset contains german quotes from wikiquote. It consists of two columns named 'author' and 'quote'.\nFor regenerating the dataset we provided the source code in this repo. You can use it as follows:\n\n\nFor usag in python just include \n\nafter installing datasets ().\n\n * Attribution: Raw data was retrieved from URL on November 27th.\n * License: CC BY-SA 4.0" ]
3b014f29111b8ab2e5fe630ffbaa7b536efaab81
# Dataset Card for "opinions" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
raminass/opinions
[ "region:us" ]
2023-11-29T13:05:08+00:00
{"dataset_info": {"features": [{"name": "author_name", "dtype": "string"}, {"name": "category", "dtype": "string"}, {"name": "per_curiam", "dtype": "bool"}, {"name": "case_name", "dtype": "string"}, {"name": "date_filed", "dtype": "timestamp[ns]"}, {"name": "federal_cite_one", "dtype": "string"}, {"name": "absolute_url", "dtype": "string"}, {"name": "cluster", "dtype": "string"}, {"name": "year_filed", "dtype": "int64"}, {"name": "scdb_id", "dtype": "string"}, {"name": "scdb_decision_direction", "dtype": "float64"}, {"name": "scdb_votes_majority", "dtype": "float64"}, {"name": "scdb_votes_minority", "dtype": "float64"}, {"name": "text", "dtype": "string"}, {"name": "clean_text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 325982706, "num_examples": 8261}, {"name": "test", "num_bytes": 79922598, "num_examples": 2066}], "download_size": 215652479, "dataset_size": 405905304}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]}
2023-12-10T11:35:34+00:00
[]
[]
TAGS #region-us
# Dataset Card for "opinions" More Information needed
[ "# Dataset Card for \"opinions\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"opinions\"\n\nMore Information needed" ]
[ 6, 12 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"opinions\"\n\nMore Information needed" ]
4856805499162fdac11ba1d030def08c71b39f79
# Dataset Card for "300_curated_prompt_v0" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tr416/300_curated_prompt_v0
[ "region:us" ]
2023-11-29T13:13:02+00:00
{"dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 432638, "num_examples": 300}], "download_size": 195038, "dataset_size": 432638}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-29T13:13:03+00:00
[]
[]
TAGS #region-us
# Dataset Card for "300_curated_prompt_v0" More Information needed
[ "# Dataset Card for \"300_curated_prompt_v0\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"300_curated_prompt_v0\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"300_curated_prompt_v0\"\n\nMore Information needed" ]
a2339691eba6637bf1206d161d9e582d4b7c3f44
The ATIS (Airline Travel Information System) Dataset includes spoken queries (i.e., utterances) annotated for the task of slot filling in conversational systems. This dataset, ATISNER, includes airline spoken queries translated from English to Turkish, customized for Named Entity Recognition. Train and test splits include 4,978 and 890 sentences, respectively. Translations are provided by the following study. Şahinuç, F., Yücesoy, V., & Koç, A. (2020). Intent Classification and Slot Filling for Turkish Dialogue Systems. In 2020 28th signal processing and communications applications conference (pp. 1–4). Github Repo: https://github.com/avaapm/TurkishNamedEntityRecognition/ # If you would like to use any material in this repository, please cite the following paper: Oguzhan Ozcelik and Cagri Toraman. 2022. Named entity recognition in Turkish: A comparative study with detailed error analysis. Inf. Process. Manage. 59, 6 (Nov 2022). https://doi.org/10.1016/j.ipm.2022.103065
ctoraman/atis-ner-turkish
[ "task_categories:token-classification", "language:tr", "license:cc-by-nc-sa-4.0", "named entity recognition", "ner", "atis", "utterance", "spoken query", "region:us" ]
2023-11-29T13:33:11+00:00
{"language": ["tr"], "license": "cc-by-nc-sa-4.0", "task_categories": ["token-classification"], "tags": ["named entity recognition", "ner", "atis", "utterance", "spoken query"]}
2023-11-29T13:45:29+00:00
[]
[ "tr" ]
TAGS #task_categories-token-classification #language-Turkish #license-cc-by-nc-sa-4.0 #named entity recognition #ner #atis #utterance #spoken query #region-us
The ATIS (Airline Travel Information System) Dataset includes spoken queries (i.e., utterances) annotated for the task of slot filling in conversational systems. This dataset, ATISNER, includes airline spoken queries translated from English to Turkish, customized for Named Entity Recognition. Train and test splits include 4,978 and 890 sentences, respectively. Translations are provided by the following study. Şahinuç, F., Yücesoy, V., & Koç, A. (2020). Intent Classification and Slot Filling for Turkish Dialogue Systems. In 2020 28th signal processing and communications applications conference (pp. 1–4). Github Repo: URL # If you would like to use any material in this repository, please cite the following paper: Oguzhan Ozcelik and Cagri Toraman. 2022. Named entity recognition in Turkish: A comparative study with detailed error analysis. Inf. Process. Manage. 59, 6 (Nov 2022). URL
[ "# If you would like to use any material in this repository, please cite the following paper:\nOguzhan Ozcelik and Cagri Toraman. 2022. Named entity recognition in Turkish: A comparative study with detailed error analysis. Inf. Process. Manage. 59, 6 (Nov 2022). URL" ]
[ "TAGS\n#task_categories-token-classification #language-Turkish #license-cc-by-nc-sa-4.0 #named entity recognition #ner #atis #utterance #spoken query #region-us \n", "# If you would like to use any material in this repository, please cite the following paper:\nOguzhan Ozcelik and Cagri Toraman. 2022. Named entity recognition in Turkish: A comparative study with detailed error analysis. Inf. Process. Manage. 59, 6 (Nov 2022). URL" ]
[ 56, 68 ]
[ "passage: TAGS\n#task_categories-token-classification #language-Turkish #license-cc-by-nc-sa-4.0 #named entity recognition #ner #atis #utterance #spoken query #region-us \n# If you would like to use any material in this repository, please cite the following paper:\nOguzhan Ozcelik and Cagri Toraman. 2022. Named entity recognition in Turkish: A comparative study with detailed error analysis. Inf. Process. Manage. 59, 6 (Nov 2022). URL" ]
c2dca5db3c429cdbd7228b6d2276081a5f245c37
# Dataset Card for "cai-conversation-prod-h4" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
vwxyzjn/cai-conversation-prod-h4
[ "region:us" ]
2023-11-29T13:50:22+00:00
{"dataset_info": {"features": [{"name": "index", "dtype": "int64"}, {"name": "prompt", "dtype": "string"}, {"name": "init_prompt", "struct": [{"name": "content", "dtype": "string"}, {"name": "role", "dtype": "string"}]}, {"name": "init_response", "struct": [{"name": "content", "dtype": "string"}, {"name": "role", "dtype": "string"}]}, {"name": "critic_prompt", "struct": [{"name": "content", "dtype": "string"}, {"name": "role", "dtype": "string"}]}, {"name": "critic_response", "struct": [{"name": "content", "dtype": "string"}, {"name": "role", "dtype": "string"}]}, {"name": "revision_prompt", "struct": [{"name": "content", "dtype": "string"}, {"name": "role", "dtype": "string"}]}, {"name": "revision_response", "struct": [{"name": "content", "dtype": "string"}, {"name": "role", "dtype": "string"}]}, {"name": "messages", "list": [{"name": "content", "dtype": "string"}, {"name": "role", "dtype": "string"}]}, {"name": "chosen", "list": [{"name": "content", "dtype": "string"}, {"name": "role", "dtype": "string"}]}, {"name": "rejected", "list": [{"name": "content", "dtype": "string"}, {"name": "role", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 608100382, "num_examples": 160800}, {"name": "test", "num_bytes": 32621318, "num_examples": 8552}], "download_size": 288349996, "dataset_size": 640721700}}
2023-11-29T13:50:43+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cai-conversation-prod-h4" More Information needed
[ "# Dataset Card for \"cai-conversation-prod-h4\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cai-conversation-prod-h4\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cai-conversation-prod-h4\"\n\nMore Information needed" ]
8af1554d5f079aa16a50d4897018d49eb9359264
# Dataset Card for "thai-wiki-dataset-v3" This dataset collects all Thai Wikimedia project that cleaned all text for Thai language. Example: [Wikipedia](https://th.wikipedia.org/), [Wikiquote](https://th.wikiquote.org/), [Wikibooks](https://th.wikibooks.org/), [Wikisource](https://th.wikisource.org), and [Wiktionary](https://th.wiktionary.org/). **Use cause**: RAG, and pretraining model. License: cc-by-sa-3.0
pythainlp/thai-wiki-dataset-v3
[ "task_categories:text-generation", "size_categories:100K<n<1M", "language:th", "license:cc-by-sa-3.0", "region:us" ]
2023-11-29T13:53:19+00:00
{"language": ["th"], "license": "cc-by-sa-3.0", "size_categories": ["100K<n<1M"], "task_categories": ["text-generation"], "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "title", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "source", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1255848857, "num_examples": 196533}], "download_size": 429985288, "dataset_size": 1255848857}}
2024-01-20T07:16:36+00:00
[]
[ "th" ]
TAGS #task_categories-text-generation #size_categories-100K<n<1M #language-Thai #license-cc-by-sa-3.0 #region-us
# Dataset Card for "thai-wiki-dataset-v3" This dataset collects all Thai Wikimedia project that cleaned all text for Thai language. Example: Wikipedia, Wikiquote, Wikibooks, Wikisource, and Wiktionary. Use cause: RAG, and pretraining model. License: cc-by-sa-3.0
[ "# Dataset Card for \"thai-wiki-dataset-v3\"\n\nThis dataset collects all Thai Wikimedia project that cleaned all text for Thai language. Example: Wikipedia, Wikiquote, Wikibooks, Wikisource, and Wiktionary.\n\nUse cause: RAG, and pretraining model.\n\nLicense: cc-by-sa-3.0" ]
[ "TAGS\n#task_categories-text-generation #size_categories-100K<n<1M #language-Thai #license-cc-by-sa-3.0 #region-us \n", "# Dataset Card for \"thai-wiki-dataset-v3\"\n\nThis dataset collects all Thai Wikimedia project that cleaned all text for Thai language. Example: Wikipedia, Wikiquote, Wikibooks, Wikisource, and Wiktionary.\n\nUse cause: RAG, and pretraining model.\n\nLicense: cc-by-sa-3.0" ]
[ 45, 74 ]
[ "passage: TAGS\n#task_categories-text-generation #size_categories-100K<n<1M #language-Thai #license-cc-by-sa-3.0 #region-us \n# Dataset Card for \"thai-wiki-dataset-v3\"\n\nThis dataset collects all Thai Wikimedia project that cleaned all text for Thai language. Example: Wikipedia, Wikiquote, Wikibooks, Wikisource, and Wiktionary.\n\nUse cause: RAG, and pretraining model.\n\nLicense: cc-by-sa-3.0" ]
a105c73e17805804bdc26b49f70166848fcee7f2
# Dataset Card for "vsums_test_phillip_jones_full" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Xapien/vsums_test_phillip_jones_full
[ "region:us" ]
2023-11-29T14:00:07+00:00
{"dataset_info": {"features": [{"name": "subject_A", "dtype": "string"}, {"name": "entity_sourcetext_A", "dtype": "string"}, {"name": "entity_fingerprint_A", "dtype": "string"}, {"name": "DRE_A", "dtype": "string"}, {"name": "new_entity_description_A", "dtype": "string"}, {"name": "subject_B", "dtype": "string"}, {"name": "entity_sourcetext_B", "dtype": "string"}, {"name": "entity_fingerprint_B", "dtype": "string"}, {"name": "DRE_B", "dtype": "string"}, {"name": "new_entity_description_B", "dtype": "string"}, {"name": "new_similarity", "dtype": "float64"}, {"name": "old_similarity", "dtype": "float64"}, {"name": "same_persona", "dtype": "bool"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 19068422, "num_examples": 3655}], "download_size": 710820, "dataset_size": 19068422}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-29T14:00:10+00:00
[]
[]
TAGS #region-us
# Dataset Card for "vsums_test_phillip_jones_full" More Information needed
[ "# Dataset Card for \"vsums_test_phillip_jones_full\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"vsums_test_phillip_jones_full\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"vsums_test_phillip_jones_full\"\n\nMore Information needed" ]
f3191b391b3963e1947c6a70ae5d97b50fe9d648
The "gender identity" subset of the large-scale dataset published in the LREC 2022 paper "Large-Scale Hate Speech Detection with Cross-Domain Transfer". This subset is used in the experiments of "Şahinuç, F., Yilmaz, E. H., Toraman, C., & Koç, A. (2023). The effect of gender bias on hate speech detection. Signal, Image and Video Processing, 17(4), 1591-1597." The "gender identity" subset includes 20,000 tweets in English. The published data split is the first fold of 10-fold cross-validation, used in the experiments mentioned above. Train split has 18,000 tweets. Test split has 2,000 tweets. HateLabel: - 0 Normal - 1 Offensive - 2 Hate # GitHub Repo: https://github.com/avaapm/hatespeech # If you use this dataset, please cite the following papers: - Toraman, C., Şahinuç, F., & Yilmaz, E. (2022, June). Large-Scale Hate Speech Detection with Cross-Domain Transfer. In Proceedings of the Thirteenth Language Resources and Evaluation Conference (pp. 2215-2225). - Şahinuç, F., Yilmaz, E. H., Toraman, C., & Koç, A. (2023). The effect of gender bias on hate speech detection. Signal, Image and Video Processing, 17(4), 1591-1597.
ctoraman/gender-hate-speech
[ "task_categories:text-classification", "language:en", "license:cc-by-nc-sa-4.0", "hate speech", "hate speech detection", "hate-speech", "tweets", "social media", "hate-speech-detection", "gender identity", "gender", "region:us" ]
2023-11-29T14:10:28+00:00
{"language": ["en"], "license": "cc-by-nc-sa-4.0", "task_categories": ["text-classification"], "tags": ["hate speech", "hate speech detection", "hate-speech", "tweets", "social media", "hate-speech-detection", "gender identity", "gender"]}
2023-11-30T12:02:34+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #language-English #license-cc-by-nc-sa-4.0 #hate speech #hate speech detection #hate-speech #tweets #social media #hate-speech-detection #gender identity #gender #region-us
The "gender identity" subset of the large-scale dataset published in the LREC 2022 paper "Large-Scale Hate Speech Detection with Cross-Domain Transfer". This subset is used in the experiments of "Şahinuç, F., Yilmaz, E. H., Toraman, C., & Koç, A. (2023). The effect of gender bias on hate speech detection. Signal, Image and Video Processing, 17(4), 1591-1597." The "gender identity" subset includes 20,000 tweets in English. The published data split is the first fold of 10-fold cross-validation, used in the experiments mentioned above. Train split has 18,000 tweets. Test split has 2,000 tweets. HateLabel: - 0 Normal - 1 Offensive - 2 Hate # GitHub Repo: URL # If you use this dataset, please cite the following papers: - Toraman, C., Şahinuç, F., & Yilmaz, E. (2022, June). Large-Scale Hate Speech Detection with Cross-Domain Transfer. In Proceedings of the Thirteenth Language Resources and Evaluation Conference (pp. 2215-2225). - Şahinuç, F., Yilmaz, E. H., Toraman, C., & Koç, A. (2023). The effect of gender bias on hate speech detection. Signal, Image and Video Processing, 17(4), 1591-1597.
[ "# GitHub Repo: \nURL", "# If you use this dataset, please cite the following papers:\n- Toraman, C., Şahinuç, F., & Yilmaz, E. (2022, June). Large-Scale Hate Speech Detection with Cross-Domain Transfer. In Proceedings of the Thirteenth Language Resources and Evaluation Conference (pp. 2215-2225).\n- Şahinuç, F., Yilmaz, E. H., Toraman, C., & Koç, A. (2023). The effect of gender bias on hate speech detection. Signal, Image and Video Processing, 17(4), 1591-1597." ]
[ "TAGS\n#task_categories-text-classification #language-English #license-cc-by-nc-sa-4.0 #hate speech #hate speech detection #hate-speech #tweets #social media #hate-speech-detection #gender identity #gender #region-us \n", "# GitHub Repo: \nURL", "# If you use this dataset, please cite the following papers:\n- Toraman, C., Şahinuç, F., & Yilmaz, E. (2022, June). Large-Scale Hate Speech Detection with Cross-Domain Transfer. In Proceedings of the Thirteenth Language Resources and Evaluation Conference (pp. 2215-2225).\n- Şahinuç, F., Yilmaz, E. H., Toraman, C., & Koç, A. (2023). The effect of gender bias on hate speech detection. Signal, Image and Video Processing, 17(4), 1591-1597." ]
[ 72, 8, 138 ]
[ "passage: TAGS\n#task_categories-text-classification #language-English #license-cc-by-nc-sa-4.0 #hate speech #hate speech detection #hate-speech #tweets #social media #hate-speech-detection #gender identity #gender #region-us \n# GitHub Repo: \nURL# If you use this dataset, please cite the following papers:\n- Toraman, C., Şahinuç, F., & Yilmaz, E. (2022, June). Large-Scale Hate Speech Detection with Cross-Domain Transfer. In Proceedings of the Thirteenth Language Resources and Evaluation Conference (pp. 2215-2225).\n- Şahinuç, F., Yilmaz, E. H., Toraman, C., & Koç, A. (2023). The effect of gender bias on hate speech detection. Signal, Image and Video Processing, 17(4), 1591-1597." ]
ec96b0a45588b8ec376d7d471c21830edcb16cbe
The "gender identity" subset of the large-scale dataset published in the LREC 2022 paper "Large-Scale Hate Speech Detection with Cross-Domain Transfer". This subset is also used in the experiments of "Şahinuç, F., Yilmaz, E. H., Toraman, C., & Koç, A. (2023). The effect of gender bias on hate speech detection. Signal, Image and Video Processing, 17(4), 1591-1597." The "gender identity" subset includes 20,000 tweets in Turkish. The published data split is the first fold of 10-fold cross-validation, used in the experiments mentioned above. Train split has 18,000 tweets. Test split has 2,000 tweets. HateLabel: - 0 Normal - 1 Offensive - 2 Hate # GitHub Repo: https://github.com/avaapm/hatespeech # If you use this dataset, please cite the following papers: - Toraman, C., Şahinuç, F., & Yilmaz, E. (2022, June). Large-Scale Hate Speech Detection with Cross-Domain Transfer. In Proceedings of the Thirteenth Language Resources and Evaluation Conference (pp. 2215-2225). - Şahinuç, F., Yilmaz, E. H., Toraman, C., & Koç, A. (2023). The effect of gender bias on hate speech detection. Signal, Image and Video Processing, 17(4), 1591-1597.
ctoraman/gender-hate-speech-turkish
[ "task_categories:text-classification", "language:tr", "license:cc-by-nc-sa-4.0", "hate speech", "hate speech detection", "hate-speech", "tweets", "social media", "topic", "hate-speech-detection", "gender identity", "gender", "region:us" ]
2023-11-29T14:13:15+00:00
{"language": ["tr"], "license": "cc-by-nc-sa-4.0", "task_categories": ["text-classification"], "tags": ["hate speech", "hate speech detection", "hate-speech", "tweets", "social media", "topic", "hate-speech-detection", "gender identity", "gender"]}
2023-11-30T12:03:50+00:00
[]
[ "tr" ]
TAGS #task_categories-text-classification #language-Turkish #license-cc-by-nc-sa-4.0 #hate speech #hate speech detection #hate-speech #tweets #social media #topic #hate-speech-detection #gender identity #gender #region-us
The "gender identity" subset of the large-scale dataset published in the LREC 2022 paper "Large-Scale Hate Speech Detection with Cross-Domain Transfer". This subset is also used in the experiments of "Şahinuç, F., Yilmaz, E. H., Toraman, C., & Koç, A. (2023). The effect of gender bias on hate speech detection. Signal, Image and Video Processing, 17(4), 1591-1597." The "gender identity" subset includes 20,000 tweets in Turkish. The published data split is the first fold of 10-fold cross-validation, used in the experiments mentioned above. Train split has 18,000 tweets. Test split has 2,000 tweets. HateLabel: - 0 Normal - 1 Offensive - 2 Hate # GitHub Repo: URL # If you use this dataset, please cite the following papers: - Toraman, C., Şahinuç, F., & Yilmaz, E. (2022, June). Large-Scale Hate Speech Detection with Cross-Domain Transfer. In Proceedings of the Thirteenth Language Resources and Evaluation Conference (pp. 2215-2225). - Şahinuç, F., Yilmaz, E. H., Toraman, C., & Koç, A. (2023). The effect of gender bias on hate speech detection. Signal, Image and Video Processing, 17(4), 1591-1597.
[ "# GitHub Repo: \nURL", "# If you use this dataset, please cite the following papers:\n- Toraman, C., Şahinuç, F., & Yilmaz, E. (2022, June). Large-Scale Hate Speech Detection with Cross-Domain Transfer. In Proceedings of the Thirteenth Language Resources and Evaluation Conference (pp. 2215-2225).\n- Şahinuç, F., Yilmaz, E. H., Toraman, C., & Koç, A. (2023). The effect of gender bias on hate speech detection. Signal, Image and Video Processing, 17(4), 1591-1597." ]
[ "TAGS\n#task_categories-text-classification #language-Turkish #license-cc-by-nc-sa-4.0 #hate speech #hate speech detection #hate-speech #tweets #social media #topic #hate-speech-detection #gender identity #gender #region-us \n", "# GitHub Repo: \nURL", "# If you use this dataset, please cite the following papers:\n- Toraman, C., Şahinuç, F., & Yilmaz, E. (2022, June). Large-Scale Hate Speech Detection with Cross-Domain Transfer. In Proceedings of the Thirteenth Language Resources and Evaluation Conference (pp. 2215-2225).\n- Şahinuç, F., Yilmaz, E. H., Toraman, C., & Koç, A. (2023). The effect of gender bias on hate speech detection. Signal, Image and Video Processing, 17(4), 1591-1597." ]
[ 76, 8, 138 ]
[ "passage: TAGS\n#task_categories-text-classification #language-Turkish #license-cc-by-nc-sa-4.0 #hate speech #hate speech detection #hate-speech #tweets #social media #topic #hate-speech-detection #gender identity #gender #region-us \n# GitHub Repo: \nURL# If you use this dataset, please cite the following papers:\n- Toraman, C., Şahinuç, F., & Yilmaz, E. (2022, June). Large-Scale Hate Speech Detection with Cross-Domain Transfer. In Proceedings of the Thirteenth Language Resources and Evaluation Conference (pp. 2215-2225).\n- Şahinuç, F., Yilmaz, E. H., Toraman, C., & Koç, A. (2023). The effect of gender bias on hate speech detection. Signal, Image and Video Processing, 17(4), 1591-1597." ]
7c5ba140d8546753f4f8d4301c176ecda7e35450
# Formatted MMLU dataset MMLU Dataset preprocessed and split by [qLoRA](https://github.com/artidoro/qlora/tree/main) # References ``` @article{hendryckstest2021, title={Measuring Massive Multitask Language Understanding}, author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt}, journal={Proceedings of the International Conference on Learning Representations (ICLR)}, year={2021} } @article{hendrycks2021ethics, title={Aligning AI With Shared Human Values}, author={Dan Hendrycks and Collin Burns and Steven Basart and Andrew Critch and Jerry Li and Dawn Song and Jacob Steinhardt}, journal={Proceedings of the International Conference on Learning Representations (ICLR)}, year={2021} } ```
Cheng98/mmlu
[ "task_categories:text-classification", "language:en", "region:us" ]
2023-11-29T14:19:34+00:00
{"language": ["en"], "task_categories": ["text-classification"], "configs": [{"config_name": "zero-shot", "data_files": [{"split": "validation", "path": "data/five-shot/val.json"}, {"split": "test", "path": "data/five-shot/test.json"}]}, {"config_name": "five-shot", "data_files": [{"split": "validation", "path": "data/five-shot/val.json"}, {"split": "test", "path": "data/five-shot/test.json"}]}]}
2023-11-29T23:51:38+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #language-English #region-us
# Formatted MMLU dataset MMLU Dataset preprocessed and split by qLoRA # References
[ "# Formatted MMLU dataset\n\nMMLU Dataset preprocessed and split by qLoRA", "# References" ]
[ "TAGS\n#task_categories-text-classification #language-English #region-us \n", "# Formatted MMLU dataset\n\nMMLU Dataset preprocessed and split by qLoRA", "# References" ]
[ 21, 20, 3 ]
[ "passage: TAGS\n#task_categories-text-classification #language-English #region-us \n# Formatted MMLU dataset\n\nMMLU Dataset preprocessed and split by qLoRA# References" ]
98db8c3479a72076fa833886faf053fadcc954b7
# Dataset Card for "disaster-tweets-ru" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
galthran/disaster-tweets-ru
[ "region:us" ]
2023-11-29T14:31:13+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "val", "path": "data/val-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "keyword", "dtype": "string"}, {"name": "location", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "target", "dtype": "int64"}, {"name": "text_ru", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2017592, "num_examples": 6090}, {"name": "val", "num_bytes": 498856, "num_examples": 1523}, {"name": "test", "num_bytes": 1085675, "num_examples": 3263}], "download_size": 2007034, "dataset_size": 3602123}}
2023-11-30T08:29:28+00:00
[]
[]
TAGS #region-us
# Dataset Card for "disaster-tweets-ru" More Information needed
[ "# Dataset Card for \"disaster-tweets-ru\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"disaster-tweets-ru\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"disaster-tweets-ru\"\n\nMore Information needed" ]
7bd687b357f424f7f6bb41a6e50dd052194837fa
# Dataset Card for Dataset Name ## Dataset Details ### Dataset Description This extensive dataset, hosted on Huggingface, is a comprehensive resource for Optical Character Recognition (OCR) in the Telugu language, featuring an impressive array of 90+ configurations. Each configuration in this dataset corresponds to a unique font, meticulously curated by Dr. Rakesh Achanta and sourced from his GitHub repository (https://github.com/TeluguOCR/banti_telugu_ocr). The dataset is specifically designed to support and enhance the development of OCR models, ranging from simple Convolutional Recurrent Neural Network (CRNN) architectures to more advanced systems like trOCR. The versatility of this dataset lies in its large volume and diversity, making it an ideal choice for researchers and developers aiming to build robust OCR systems for the Telugu script. Key Features: - Font Diversity: Over 90 unique fonts, each forming a separate configuration, providing a rich variety in text styles and nuances. - Large Volume: Each configuration contains approximately 800,000 examples, summing up to a vast pool of data for comprehensive training and evaluation. - Data Split: The dataset is pre-split into training, validation, and test sets, following a 60/20/20 ratio, to facilitate efficient model training and benchmarking. - Use Cases: Ideal for developing a wide range of OCR models - from basic CRNNs to sophisticated models like trOCR. - Accessibility: Hosted on Huggingface, ensuring easy access and integration with various machine learning frameworks and tools. This dataset stands as a testament to Dr. Rakesh Achanta's dedication to enhancing Telugu language processing technologies. It is not just a tool for model development but also a gateway to preserving and digitizing the rich literary heritage of the Telugu language. Researchers and developers leveraging this dataset are encouraged to adhere to the ethical guidelines of AI research and development, ensuring that the applications developed are for the benefit of language preservation, accessibility, and technological advancement in a responsible manner. - **Fonts Curated by:** Dr. Rakesh Achanta - **Shared by:** Krishna Sriharsha Gundu - **Language(s) (NLP):** Telugu ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [Original Books Dataset](https://github.com/AnushaMotamarri/Telugu-Books-Dataset)
gksriharsha/chitralekha
[ "task_categories:image-to-text", "size_categories:1M<n<10M", "language:te", "license:gpl-3.0", "region:us" ]
2023-11-29T14:31:24+00:00
{"language": ["te"], "license": "gpl-3.0", "size_categories": ["1M<n<10M"], "task_categories": ["image-to-text"], "dataset_info": [{"config_name": "Dhurjati", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1298445060.3780885, "num_examples": 475834}, {"name": "validation", "num_bytes": 432816839.3109558, "num_examples": 158612}, {"name": "test", "num_bytes": 432816839.3109558, "num_examples": 158612}], "download_size": 2214924048, "dataset_size": 2164078739}, {"config_name": "Gidugu", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1282865192.8855712, "num_examples": 476265}, {"name": "validation", "num_bytes": 427624424.55721444, "num_examples": 158756}, {"name": "test", "num_bytes": 427624424.55721444, "num_examples": 158756}], "download_size": 2189311335, "dataset_size": 2138114042.0000002}, {"config_name": "Gurajada", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1387146264.0840201, "num_examples": 474742}, {"name": "validation", "num_bytes": 462384035.9579899, "num_examples": 158248}, {"name": "test", "num_bytes": 462384035.9579899, "num_examples": 158248}], "download_size": 2343396240, "dataset_size": 2311914336}, {"config_name": "LakkiReddy", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1691465720.2043626, "num_examples": 477456}, {"name": "validation", "num_bytes": 563825449.3978187, "num_examples": 159153}, {"name": "test", "num_bytes": 563825449.3978187, "num_examples": 159153}], "download_size": 2803364713, "dataset_size": 2819116619}, {"config_name": "Mallanna", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1501113970.3809116, "num_examples": 476159}, {"name": "validation", "num_bytes": 500372374.30954427, "num_examples": 158720}, {"name": "test", "num_bytes": 500372374.30954427, "num_examples": 158720}], "download_size": 2502257967, "dataset_size": 2501858719}, {"config_name": "Mandali-Regular", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1473975690.6129284, "num_examples": 472433}, {"name": "validation", "num_bytes": 491326270.19353586, "num_examples": 157478}, {"name": "test", "num_bytes": 491326270.19353586, "num_examples": 157478}], "download_size": 2457756020, "dataset_size": 2456628231}, {"config_name": "NATS", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1356797141.105923, "num_examples": 473392}, {"name": "validation", "num_bytes": 452267624.4470385, "num_examples": 157798}, {"name": "test", "num_bytes": 452267624.4470385, "num_examples": 157798}], "download_size": 2303879039, "dataset_size": 2261332390}, {"config_name": "NTR", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1574367624.5834982, "num_examples": 473991}, {"name": "validation", "num_bytes": 524792529.7082509, "num_examples": 157998}, {"name": "test", "num_bytes": 524792529.7082509, "num_examples": 157998}], "download_size": 2615211115, "dataset_size": 2623952684}, {"config_name": "Nandini", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1726040301.9777348, "num_examples": 478621}, {"name": "validation", "num_bytes": 575349171.5111326, "num_examples": 159541}, {"name": "test", "num_bytes": 575349171.5111326, "num_examples": 159541}], "download_size": 2860476166, "dataset_size": 2876738645}, {"config_name": "Nandini3", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1578799869.8561406, "num_examples": 474888}, {"name": "validation", "num_bytes": 526266623.2853802, "num_examples": 158296}, {"name": "test", "num_bytes": 526269947.8584792, "num_examples": 158297}], "download_size": 5245174350, "dataset_size": 2631336441.0000005}, {"config_name": "NotoSansTelugu-Bold", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1752162695.265523, "num_examples": 476930}, {"name": "validation", "num_bytes": 584055456.3672385, "num_examples": 158977}, {"name": "test", "num_bytes": 584055456.3672385, "num_examples": 158977}], "download_size": 2904018741, "dataset_size": 2920273608}, {"config_name": "NotoSansTelugu-Regular", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1718034768.894641, "num_examples": 478227}, {"name": "validation", "num_bytes": 572678256.2982136, "num_examples": 159409}, {"name": "test", "num_bytes": 572681848.8071454, "num_examples": 159410}], "download_size": 2848500410, "dataset_size": 2863394874}, {"config_name": "NotoSansTeluguUI-Bold", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1750230388.4259622, "num_examples": 476148}, {"name": "validation", "num_bytes": 583413805.2870189, "num_examples": 158717}, {"name": "test", "num_bytes": 583413805.2870189, "num_examples": 158717}], "download_size": 2901117051, "dataset_size": 2917057999}, {"config_name": "NotoSansTeluguUI-Regular", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1723039562.5891204, "num_examples": 477735}, {"name": "validation", "num_bytes": 574346520.8630401, "num_examples": 159245}, {"name": "test", "num_bytes": 574350127.5478394, "num_examples": 159246}], "download_size": 2856472137, "dataset_size": 2871736211}, {"config_name": "NotoSerifTelugu-VariableFont_wght", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1615401522.415037, "num_examples": 475403}, {"name": "validation", "num_bytes": 538468306.7924815, "num_examples": 158468}, {"name": "test", "num_bytes": 538468306.7924815, "num_examples": 158468}], "download_size": 2684117723, "dataset_size": 2692338136}, {"config_name": "Peddana-Regular", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1349600562.5605006, "num_examples": 476033}, {"name": "validation", "num_bytes": 449867799.2197497, "num_examples": 158678}, {"name": "test", "num_bytes": 449867799.2197497, "num_examples": 158678}], "download_size": 2288692690, "dataset_size": 2249336161}, {"config_name": "Ponnala", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1564325775.2011507, "num_examples": 476711}, {"name": "validation", "num_bytes": 521443018.8994247, "num_examples": 158904}, {"name": "test", "num_bytes": 521443018.8994247, "num_examples": 158904}], "download_size": 2597151928, "dataset_size": 2607211813}, {"config_name": "Pothana2000", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1533893192.4, "num_examples": 474486}, {"name": "validation", "num_bytes": 511297730.8, "num_examples": 158162}, {"name": "test", "num_bytes": 511297730.8, "num_examples": 158162}], "download_size": 2546261970, "dataset_size": 2556488654}, {"config_name": "Pothana2001", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1292276278.9111156, "num_examples": 477344}, {"name": "validation", "num_bytes": 430759662.0444421, "num_examples": 159115}, {"name": "test", "num_bytes": 430759662.0444421, "num_examples": 159115}], "download_size": 2209119000, "dataset_size": 2153795603}, {"config_name": "Ramabhadra1", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1356669137.4, "num_examples": 477120}, {"name": "validation", "num_bytes": 452223045.8, "num_examples": 159040}, {"name": "test", "num_bytes": 452223045.8, "num_examples": 159040}], "download_size": 2293250323, "dataset_size": 2261115229}, {"config_name": "RamaneeyaWin", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1569779237.530234, "num_examples": 475390}, {"name": "validation", "num_bytes": 523261947.23488295, "num_examples": 158464}, {"name": "test", "num_bytes": 523261947.23488295, "num_examples": 158464}], "download_size": 2609295282, "dataset_size": 2616303132}, {"config_name": "Ramaraja-Regular", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1410891933.3096473, "num_examples": 472584}, {"name": "validation", "num_bytes": 470297311.1032158, "num_examples": 157528}, {"name": "test", "num_bytes": 470300296.5871368, "num_examples": 157529}], "download_size": 2371358480, "dataset_size": 2351489541}, {"config_name": "RaviPrakash", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1285990240.8963664, "num_examples": 473290}, {"name": "validation", "num_bytes": 428665225.05181676, "num_examples": 157764}, {"name": "test", "num_bytes": 428665225.05181676, "num_examples": 157764}], "download_size": 2191728340, "dataset_size": 2143320690.9999998}, {"config_name": "Sree Krushnadevaraya", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1449568569.1993153, "num_examples": 475658}, {"name": "validation", "num_bytes": 483190538.90034235, "num_examples": 158553}, {"name": "test", "num_bytes": 483190538.90034235, "num_examples": 158553}], "download_size": 2435442971, "dataset_size": 2415949647}, {"config_name": "Subhadra", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1695480570.569019, "num_examples": 473720}, {"name": "validation", "num_bytes": 565161383.2154905, "num_examples": 157907}, {"name": "test", "num_bytes": 565161383.2154905, "num_examples": 157907}], "download_size": 2809365039, "dataset_size": 2825803337}, {"config_name": "Suguna", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1446982722.6, "num_examples": 477066}, {"name": "validation", "num_bytes": 482327574.2, "num_examples": 159022}, {"name": "test", "num_bytes": 482327574.2, "num_examples": 159022}], "download_size": 2415257732, "dataset_size": 2411637871}, {"config_name": "Suranna", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1503599948.8440886, "num_examples": 474592}, {"name": "validation", "num_bytes": 501202095.07795566, "num_examples": 158198}, {"name": "test", "num_bytes": 501202095.07795566, "num_examples": 158198}], "download_size": 2506994404, "dataset_size": 2506004139}, {"config_name": "Suravara_Samhita", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1558595237.4, "num_examples": 474537}, {"name": "validation", "num_bytes": 519531745.8, "num_examples": 158179}, {"name": "test", "num_bytes": 519531745.8, "num_examples": 158179}], "download_size": 2585415226, "dataset_size": 2597658729}, {"config_name": "Suravara_Swarna", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1486359795.6, "num_examples": 475680}, {"name": "validation", "num_bytes": 495453265.2, "num_examples": 158560}, {"name": "test", "num_bytes": 495453265.2, "num_examples": 158560}], "download_size": 2475591226, "dataset_size": 2477266326}, {"config_name": "Suravara_Swarna_bold", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1720811516.4, "num_examples": 478134}, {"name": "validation", "num_bytes": 573603838.8, "num_examples": 159378}, {"name": "test", "num_bytes": 573603838.8, "num_examples": 159378}], "download_size": 2850593671, "dataset_size": 2868019194}, {"config_name": "Suravara_Swarna_italic", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1447766013.2634926, "num_examples": 479031}, {"name": "validation", "num_bytes": 482591693.36825377, "num_examples": 159678}, {"name": "test", "num_bytes": 482591693.36825377, "num_examples": 159678}], "download_size": 2422412589, "dataset_size": 2412949400}, {"config_name": "Suravaram", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1429147481.2187955, "num_examples": 477026}, {"name": "validation", "num_bytes": 476383492.3906023, "num_examples": 159009}, {"name": "test", "num_bytes": 476383492.3906023, "num_examples": 159009}], "download_size": 4809669330, "dataset_size": 2381914466}, {"config_name": "TLOTAmmaBI_ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2460661581.730414, "num_examples": 475658}, {"name": "validation", "num_bytes": 820222251.6347929, "num_examples": 158553}, {"name": "test", "num_bytes": 820222251.6347929, "num_examples": 158553}], "download_size": 4096792615, "dataset_size": 4101106084.9999995}, {"config_name": "TLOTAmmaB_ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2416168779.915695, "num_examples": 477459}, {"name": "validation", "num_bytes": 805389593.3052317, "num_examples": 159153}, {"name": "test", "num_bytes": 805394653.7790732, "num_examples": 159154}], "download_size": 4021858976, "dataset_size": 4026953027}, {"config_name": "TLOTAmmaI_ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2477661003.4358616, "num_examples": 472795}, {"name": "validation", "num_bytes": 825890494.7820691, "num_examples": 157599}, {"name": "test", "num_bytes": 825890494.7820691, "num_examples": 157599}], "download_size": 4125584249, "dataset_size": 4129441993}, {"config_name": "TLOTAmmaN_ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2433593183.980863, "num_examples": 476750}, {"name": "validation", "num_bytes": 811199429.5095686, "num_examples": 158917}, {"name": "test", "num_bytes": 811199429.5095686, "num_examples": 158917}], "download_size": 4050885257, "dataset_size": 4055992043.0000005}, {"config_name": "TLOTAmrutaBI_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2653406725.2, "num_examples": 475320}, {"name": "validation", "num_bytes": 884468908.4, "num_examples": 158440}, {"name": "test", "num_bytes": 884468908.4, "num_examples": 158440}], "download_size": 4422612970, "dataset_size": 4422344542}, {"config_name": "TLOTAmrutaB_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2636543466.6297607, "num_examples": 474288}, {"name": "validation", "num_bytes": 878847822.2099203, "num_examples": 158096}, {"name": "test", "num_bytes": 878853381.1603189, "num_examples": 158097}], "download_size": 4393963744, "dataset_size": 4394244670}, {"config_name": "TLOTAtreyaBI_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1920072146.440807, "num_examples": 476571}, {"name": "validation", "num_bytes": 640024048.8136024, "num_examples": 158857}, {"name": "test", "num_bytes": 640028077.7455903, "num_examples": 158858}], "download_size": 3187176178, "dataset_size": 3200124273}, {"config_name": "TLOTAtreyaB_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1468763709.6, "num_examples": 477087}, {"name": "validation", "num_bytes": 489587903.2, "num_examples": 159029}, {"name": "test", "num_bytes": 489587903.2, "num_examples": 159029}], "download_size": 2463733719, "dataset_size": 2447939516}, {"config_name": "TLOTAtreyaI_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2031521130, "num_examples": 478089}, {"name": "validation", "num_bytes": 677173710, "num_examples": 159363}, {"name": "test", "num_bytes": 677173710, "num_examples": 159363}], "download_size": 3373208127, "dataset_size": 3385868550}, {"config_name": "TLOTAtreyaN_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1499893860.1101012, "num_examples": 475416}, {"name": "validation", "num_bytes": 499967774.9449494, "num_examples": 158473}, {"name": "test", "num_bytes": 499967774.9449494, "num_examples": 158473}], "download_size": 2503688455, "dataset_size": 2499829410}, {"config_name": "TLOTChandanaBI_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2570736110.0222764, "num_examples": 477280}, {"name": "validation", "num_bytes": 856915627.4888619, "num_examples": 159094}, {"name": "test", "num_bytes": 856915627.4888619, "num_examples": 159094}], "download_size": 8582881701, "dataset_size": 4284567365.000001}, {"config_name": "TLOTChandanaB_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2573995646.187106, "num_examples": 477970}, {"name": "validation", "num_bytes": 858002138.906447, "num_examples": 159324}, {"name": "test", "num_bytes": 858002138.906447, "num_examples": 159324}], "download_size": 4287747645, "dataset_size": 4289999924}, {"config_name": "TLOTDevaI_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2480881369.494744, "num_examples": 474412}, {"name": "validation", "num_bytes": 826963942.7526281, "num_examples": 158138}, {"name": "test", "num_bytes": 826963942.7526281, "num_examples": 158138}], "download_size": 4131458823, "dataset_size": 4134809255}, {"config_name": "TLOTDevaN_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2500855833.517619, "num_examples": 477159}, {"name": "validation", "num_bytes": 833618611.1725397, "num_examples": 159053}, {"name": "test", "num_bytes": 833623852.309841, "num_examples": 159054}], "download_size": 4164760790, "dataset_size": 4168098297}, {"config_name": "TLOTDraupadiBI_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2323911850.2, "num_examples": 476610}, {"name": "validation", "num_bytes": 774637283.4, "num_examples": 158870}, {"name": "test", "num_bytes": 774637283.4, "num_examples": 158870}], "download_size": 3866617083, "dataset_size": 3873186417}, {"config_name": "TLOTDraupadiB_ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2307940549.6171513, "num_examples": 479856}, {"name": "validation", "num_bytes": 769318326.1914245, "num_examples": 159953}, {"name": "test", "num_bytes": 769318326.1914245, "num_examples": 159953}], "download_size": 3839262612, "dataset_size": 3846577202}, {"config_name": "TLOTDraupadiI_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2544743977.8577175, "num_examples": 476149}, {"name": "validation", "num_bytes": 848251555.5711412, "num_examples": 158717}, {"name": "test", "num_bytes": 848251555.5711412, "num_examples": 158717}], "download_size": 4239804725, "dataset_size": 4241247089}, {"config_name": "TLOTDraupadiN_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2541474368.49558, "num_examples": 475408}, {"name": "validation", "num_bytes": 847161686.7522099, "num_examples": 158470}, {"name": "test", "num_bytes": 847161686.7522099, "num_examples": 158470}], "download_size": 4234310229, "dataset_size": 4235797742}, {"config_name": "TLOTGolkondaBI_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2389702278.805238, "num_examples": 474540}, {"name": "validation", "num_bytes": 796572462.0973812, "num_examples": 158181}, {"name": "test", "num_bytes": 796572462.0973812, "num_examples": 158181}], "download_size": 3977928852, "dataset_size": 3982847203}, {"config_name": "TLOTGolkondaB_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2389122371.711336, "num_examples": 475805}, {"name": "validation", "num_bytes": 796375797.6443319, "num_examples": 158602}, {"name": "test", "num_bytes": 796375797.6443319, "num_examples": 158602}], "download_size": 3977251991, "dataset_size": 3981873967}, {"config_name": "TLOTKrishnaB_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2432774526.539302, "num_examples": 476300}, {"name": "validation", "num_bytes": 810926544.7303492, "num_examples": 158767}, {"name": "test", "num_bytes": 810926544.7303492, "num_examples": 158767}], "download_size": 4050283714, "dataset_size": 4054627616}, {"config_name": "TLOTKrishnaI_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2480494107.7215586, "num_examples": 476670}, {"name": "validation", "num_bytes": 826831369.2405195, "num_examples": 158890}, {"name": "test", "num_bytes": 826836573.0379218, "num_examples": 158891}], "download_size": 4130987632, "dataset_size": 4134162050}, {"config_name": "TLOTKrishnaN_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2476823323.4861865, "num_examples": 474258}, {"name": "validation", "num_bytes": 825607774.4953955, "num_examples": 158086}, {"name": "test", "num_bytes": 825612997.0184178, "num_examples": 158087}], "download_size": 8245933584, "dataset_size": 4128044095}, {"config_name": "TLOTManuBI_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2416789011.099815, "num_examples": 479831}, {"name": "validation", "num_bytes": 805598015.9500924, "num_examples": 159944}, {"name": "test", "num_bytes": 805598015.9500924, "num_examples": 159944}], "download_size": 8022091215, "dataset_size": 4027985042.9999995}, {"config_name": "TLOTManuB_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2401248706.737913, "num_examples": 476523}, {"name": "validation", "num_bytes": 800416235.5793043, "num_examples": 158841}, {"name": "test", "num_bytes": 800421274.6827825, "num_examples": 158842}], "download_size": 3996692334, "dataset_size": 4002086217}, {"config_name": "TLOTManuI_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2172777272.108018, "num_examples": 474666}, {"name": "validation", "num_bytes": 724259090.7026726, "num_examples": 158222}, {"name": "test", "num_bytes": 724263668.1893097, "num_examples": 158223}], "download_size": 3613125844, "dataset_size": 3621300031}, {"config_name": "TLOTManuN_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2157988564.914396, "num_examples": 473253}, {"name": "validation", "num_bytes": 719334081.5428022, "num_examples": 157752}, {"name": "test", "num_bytes": 719334081.5428022, "num_examples": 157752}], "download_size": 3588254209, "dataset_size": 3596656728.0000005}, {"config_name": "TLOTMenakaBI_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2288615615.2453403, "num_examples": 476286}, {"name": "validation", "num_bytes": 762876676.87733, "num_examples": 158763}, {"name": "test", "num_bytes": 762876676.87733, "num_examples": 158763}], "download_size": 3808214919, "dataset_size": 3814368969}, {"config_name": "TLOTMenakaB_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2265423732.440631, "num_examples": 476485}, {"name": "validation", "num_bytes": 755144413.7796845, "num_examples": 158829}, {"name": "test", "num_bytes": 755144413.7796845, "num_examples": 158829}], "download_size": 7528268200, "dataset_size": 3775712560.0000005}, {"config_name": "TLOTMenakaI_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2248679654.497752, "num_examples": 476680}, {"name": "validation", "num_bytes": 749563029.751124, "num_examples": 158894}, {"name": "test", "num_bytes": 749563029.751124, "num_examples": 158894}], "download_size": 3740363965, "dataset_size": 3747805714}, {"config_name": "TLOTMenakaN_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2212555573.744489, "num_examples": 476734}, {"name": "validation", "num_bytes": 737521618.6277553, "num_examples": 158912}, {"name": "test", "num_bytes": 737521618.6277553, "num_examples": 158912}], "download_size": 3679785782, "dataset_size": 3687598810.9999995}, {"config_name": "TLOTPavaniBI_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2581188469.774467, "num_examples": 476364}, {"name": "validation", "num_bytes": 860401575.1127664, "num_examples": 158789}, {"name": "test", "num_bytes": 860401575.1127664, "num_examples": 158789}], "download_size": 4301716239, "dataset_size": 4301991620}, {"config_name": "TLOTPavaniB_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2536569022.9252853, "num_examples": 476365}, {"name": "validation", "num_bytes": 845526557.5373572, "num_examples": 158789}, {"name": "test", "num_bytes": 845526557.5373572, "num_examples": 158789}], "download_size": 4225675923, "dataset_size": 4227622138}, {"config_name": "TLOTPriyaBI_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3213044951.3006263, "num_examples": 472119}, {"name": "validation", "num_bytes": 1071021789.3496867, "num_examples": 157374}, {"name": "test", "num_bytes": 1071021789.3496867, "num_examples": 157374}], "download_size": 5367118968, "dataset_size": 5355088530}, {"config_name": "TLOTPriyaB_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3230362124.4, "num_examples": 475308}, {"name": "validation", "num_bytes": 1076787374.8, "num_examples": 158436}, {"name": "test", "num_bytes": 1076787374.8, "num_examples": 158436}], "download_size": 5395993279, "dataset_size": 5383936874}, {"config_name": "TLOTRajanBI_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3353184954.5082364, "num_examples": 474312}, {"name": "validation", "num_bytes": 1117735387.7458818, "num_examples": 158105}, {"name": "test", "num_bytes": 1117735387.7458818, "num_examples": 158105}], "download_size": 5601810958, "dataset_size": 5588655730}, {"config_name": "TLOTRajanB_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3333244214.4, "num_examples": 473649}, {"name": "validation", "num_bytes": 1111081404.8, "num_examples": 157883}, {"name": "test", "num_bytes": 1111081404.8, "num_examples": 157883}], "download_size": 11147115559, "dataset_size": 5555407024.000001}, {"config_name": "TLOTRajaniBI_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2052738894.6, "num_examples": 475389}, {"name": "validation", "num_bytes": 684246298.2, "num_examples": 158463}, {"name": "test", "num_bytes": 684246298.2, "num_examples": 158463}], "download_size": 3411081728, "dataset_size": 3421231491}, {"config_name": "TLOTRajaniB_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2037547632.604111, "num_examples": 475785}, {"name": "validation", "num_bytes": 679186826.6979445, "num_examples": 158596}, {"name": "test", "num_bytes": 679186826.6979445, "num_examples": 158596}], "download_size": 3385018225, "dataset_size": 3395921286}, {"config_name": "TLOTSanjanaBI_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2209718743.6491027, "num_examples": 475899}, {"name": "validation", "num_bytes": 736572914.5497009, "num_examples": 158633}, {"name": "test", "num_bytes": 736577557.8011967, "num_examples": 158634}], "download_size": 3674404765, "dataset_size": 3682869216}, {"config_name": "TLOTSanjanaB_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2217936060.895656, "num_examples": 476629}, {"name": "validation", "num_bytes": 739315122.552172, "num_examples": 158877}, {"name": "test", "num_bytes": 739315122.552172, "num_examples": 158877}], "download_size": 3687984178, "dataset_size": 3696566306}, {"config_name": "TLOTSitaraBI_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2519685455.5459795, "num_examples": 476097}, {"name": "validation", "num_bytes": 839900444.2270104, "num_examples": 158700}, {"name": "test", "num_bytes": 839900444.2270104, "num_examples": 158700}], "download_size": 4197747699, "dataset_size": 4199486344}, {"config_name": "TLOTSitaraB_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2503669021.2, "num_examples": 476304}, {"name": "validation", "num_bytes": 834556340.4, "num_examples": 158768}, {"name": "test", "num_bytes": 834556340.4, "num_examples": 158768}], "download_size": 4170641698, "dataset_size": 4172781702}, {"config_name": "TLOTSwamiB", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2425012348.9576674, "num_examples": 477330}, {"name": "validation", "num_bytes": 808342530.0211664, "num_examples": 159111}, {"name": "test", "num_bytes": 808342530.0211664, "num_examples": 159111}], "download_size": 4038041582, "dataset_size": 4041697409}, {"config_name": "TLOTSwamiBI_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2850358898.466789, "num_examples": 478777}, {"name": "validation", "num_bytes": 950123601.7666057, "num_examples": 159593}, {"name": "test", "num_bytes": 950123601.7666057, "num_examples": 159593}], "download_size": 4756940495, "dataset_size": 4750606102}, {"config_name": "TLOTSwamiB_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2597770710.722685, "num_examples": 475800}, {"name": "validation", "num_bytes": 865923570.240895, "num_examples": 158600}, {"name": "test", "num_bytes": 865929030.0364199, "num_examples": 158601}], "download_size": 4330358867, "dataset_size": 4329623311}, {"config_name": "TLOTVennela1B_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1858266228.4038165, "num_examples": 476703}, {"name": "validation", "num_bytes": 619425974.2980918, "num_examples": 158902}, {"name": "test", "num_bytes": 619425974.2980918, "num_examples": 158902}], "download_size": 9264631387, "dataset_size": 3097118177}, {"config_name": "TLOTVennelaBI_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2075214563.274462, "num_examples": 475737}, {"name": "validation", "num_bytes": 691742549.862769, "num_examples": 158580}, {"name": "test", "num_bytes": 691742549.862769, "num_examples": 158580}], "download_size": 3449852145, "dataset_size": 3458699663}, {"config_name": "TLOTVennelaB_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1853628708.5342138, "num_examples": 475764}, {"name": "validation", "num_bytes": 617876236.1780713, "num_examples": 158588}, {"name": "test", "num_bytes": 617880132.287715, "num_examples": 158589}], "download_size": 3076196686, "dataset_size": 3089385077}, {"config_name": "TLOTVennelaI_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2220159958.2, "num_examples": 477489}, {"name": "validation", "num_bytes": 740053319.4, "num_examples": 159163}, {"name": "test", "num_bytes": 740053319.4, "num_examples": 159163}], "download_size": 3692812769, "dataset_size": 3700266597}, {"config_name": "TLOTVennelaN_Ship", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1969766905.1881945, "num_examples": 476133}, {"name": "validation", "num_bytes": 656593105.4059027, "num_examples": 158712}, {"name": "test", "num_bytes": 656593105.4059027, "num_examples": 158712}], "download_size": 3270034078, "dataset_size": 3282953116}, {"config_name": "TenaliRamakrishna-Regular", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1412098107.6, "num_examples": 479922}, {"name": "validation", "num_bytes": 470699369.2, "num_examples": 159974}, {"name": "test", "num_bytes": 470699369.2, "num_examples": 159974}], "download_size": 2373061510, "dataset_size": 2353496846}, {"config_name": "Tikkana", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 237760800.6, "num_examples": 476520}, {"name": "validation", "num_bytes": 79253600.2, "num_examples": 158840}, {"name": "test", "num_bytes": 79253600.2, "num_examples": 158840}], "download_size": 266272383, "dataset_size": 396268001}, {"config_name": "TimmanaRegular", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1476790008.6, "num_examples": 478059}, {"name": "validation", "num_bytes": 492263336.2, "num_examples": 159353}, {"name": "test", "num_bytes": 492263336.2, "num_examples": 159353}], "download_size": 2461309068, "dataset_size": 2461316681}, {"config_name": "Vajram", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1522698226.9404452, "num_examples": 480837}, {"name": "validation", "num_bytes": 507566075.64681506, "num_examples": 160279}, {"name": "test", "num_bytes": 507569242.41273975, "num_examples": 160280}], "download_size": 2548130724, "dataset_size": 2537833545}, {"config_name": "Vani", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1457020940.7032518, "num_examples": 476385}, {"name": "validation", "num_bytes": 485673646.9010839, "num_examples": 158795}, {"name": "test", "num_bytes": 485676705.39566433, "num_examples": 158796}], "download_size": 2434817917, "dataset_size": 2428371293}, {"config_name": "Vanib", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1522290417.6, "num_examples": 474951}, {"name": "validation", "num_bytes": 507430139.2, "num_examples": 158317}, {"name": "test", "num_bytes": 507430139.2, "num_examples": 158317}], "download_size": 2529233521, "dataset_size": 2537150696}, {"config_name": "Vemana", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1699154826.4604304, "num_examples": 476205}, {"name": "validation", "num_bytes": 566388510.2697848, "num_examples": 158736}, {"name": "test", "num_bytes": 566388510.2697848, "num_examples": 158736}], "download_size": 2814457802, "dataset_size": 2831931847}, {"config_name": "akshar", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1339177104.1214905, "num_examples": 476169}, {"name": "validation", "num_bytes": 446395180.4392547, "num_examples": 158724}, {"name": "test", "num_bytes": 446395180.4392547, "num_examples": 158724}], "download_size": 2284376294, "dataset_size": 2231967465}, {"config_name": "gautami", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1459193859.1610594, "num_examples": 476425}, {"name": "validation", "num_bytes": 486399994.91947037, "num_examples": 158809}, {"name": "test", "num_bytes": 486399994.91947037, "num_examples": 158809}], "download_size": 2447315957, "dataset_size": 2431993849}, {"config_name": "gautamib", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1464740409.2608879, "num_examples": 477459}, {"name": "validation", "num_bytes": 488249870.869556, "num_examples": 159154}, {"name": "test", "num_bytes": 488249870.869556, "num_examples": 159154}], "download_size": 2454242590, "dataset_size": 2441240151}, {"config_name": "lohit_te", "features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1566900366.462158, "num_examples": 477809}, {"name": "validation", "num_bytes": 522301215.268921, "num_examples": 159270}, {"name": "test", "num_bytes": 522301215.268921, "num_examples": 159270}], "download_size": 2611413315, "dataset_size": 2611502797}], "configs": [{"config_name": "Dhurjati", "data_files": [{"split": "train", "path": "Dhurjati/train-*"}, {"split": "validation", "path": "Dhurjati/validation-*"}, {"split": "test", "path": "Dhurjati/test-*"}]}, {"config_name": "Gidugu", "data_files": [{"split": "train", "path": "Gidugu/train-*"}, {"split": "validation", "path": "Gidugu/validation-*"}, {"split": "test", "path": "Gidugu/test-*"}]}, {"config_name": "Gurajada", "data_files": [{"split": "train", "path": "Gurajada/train-*"}, {"split": "validation", "path": "Gurajada/validation-*"}, {"split": "test", "path": "Gurajada/test-*"}]}, {"config_name": "LakkiReddy", "data_files": [{"split": "train", "path": "LakkiReddy/train-*"}, {"split": "validation", "path": "LakkiReddy/validation-*"}, {"split": "test", "path": "LakkiReddy/test-*"}]}, {"config_name": "Mallanna", "data_files": [{"split": "train", "path": "Mallanna/train-*"}, {"split": "validation", "path": "Mallanna/validation-*"}, {"split": "test", "path": "Mallanna/test-*"}]}, {"config_name": "Mandali-Regular", "data_files": [{"split": "train", "path": "Mandali-Regular/train-*"}, {"split": "validation", "path": "Mandali-Regular/validation-*"}, {"split": "test", "path": "Mandali-Regular/test-*"}]}, {"config_name": "NATS", "data_files": [{"split": "train", "path": "NATS/train-*"}, {"split": "validation", "path": "NATS/validation-*"}, {"split": "test", "path": "NATS/test-*"}]}, {"config_name": "NTR", "data_files": [{"split": "train", "path": "NTR/train-*"}, {"split": "validation", "path": "NTR/validation-*"}, {"split": "test", "path": "NTR/test-*"}]}, {"config_name": "Nandini", "data_files": [{"split": "train", "path": "Nandini/train-*"}, {"split": "validation", "path": "Nandini/validation-*"}, {"split": "test", "path": "Nandini/test-*"}]}, {"config_name": "Nandini3", "data_files": [{"split": "train", "path": "Nandini3/train-*"}, {"split": "validation", "path": "Nandini3/validation-*"}, {"split": "test", "path": "Nandini3/test-*"}]}, {"config_name": "NotoSansTelugu-Bold", "data_files": [{"split": "train", "path": "NotoSansTelugu-Bold/train-*"}, {"split": "validation", "path": "NotoSansTelugu-Bold/validation-*"}, {"split": "test", "path": "NotoSansTelugu-Bold/test-*"}]}, {"config_name": "NotoSansTelugu-Regular", "data_files": [{"split": "train", "path": "NotoSansTelugu-Regular/train-*"}, {"split": "validation", "path": "NotoSansTelugu-Regular/validation-*"}, {"split": "test", "path": "NotoSansTelugu-Regular/test-*"}]}, {"config_name": "NotoSansTeluguUI-Bold", "data_files": [{"split": "train", "path": "NotoSansTeluguUI-Bold/train-*"}, {"split": "validation", "path": "NotoSansTeluguUI-Bold/validation-*"}, {"split": "test", "path": "NotoSansTeluguUI-Bold/test-*"}]}, {"config_name": "NotoSansTeluguUI-Regular", "data_files": [{"split": "train", "path": "NotoSansTeluguUI-Regular/train-*"}, {"split": "validation", "path": "NotoSansTeluguUI-Regular/validation-*"}, {"split": "test", "path": "NotoSansTeluguUI-Regular/test-*"}]}, {"config_name": "NotoSerifTelugu-VariableFont_wght", "data_files": [{"split": "train", "path": "NotoSerifTelugu-VariableFont_wght/train-*"}, {"split": "validation", "path": "NotoSerifTelugu-VariableFont_wght/validation-*"}, {"split": "test", "path": "NotoSerifTelugu-VariableFont_wght/test-*"}]}, {"config_name": "Peddana-Regular", "data_files": [{"split": "train", "path": "Peddana-Regular/train-*"}, {"split": "validation", "path": "Peddana-Regular/validation-*"}, {"split": "test", "path": "Peddana-Regular/test-*"}]}, {"config_name": "Ponnala", "data_files": [{"split": "train", "path": "Ponnala/train-*"}, {"split": "validation", "path": "Ponnala/validation-*"}, {"split": "test", "path": "Ponnala/test-*"}]}, {"config_name": "Pothana2000", "data_files": [{"split": "train", "path": "Pothana2000/train-*"}, {"split": "validation", "path": "Pothana2000/validation-*"}, {"split": "test", "path": "Pothana2000/test-*"}]}, {"config_name": "Pothana2001", "data_files": [{"split": "train", "path": "Pothana2001/train-*"}, {"split": "validation", "path": "Pothana2001/validation-*"}, {"split": "test", "path": "Pothana2001/test-*"}]}, {"config_name": "Ramabhadra", "data_files": [{"split": "train", "path": "Ramabhadra/train-*"}, {"split": "validation", "path": "Ramabhadra/validation-*"}, {"split": "test", "path": "Ramabhadra/test-*"}]}, {"config_name": "Ramabhadra1", "data_files": [{"split": "train", "path": "Ramabhadra1/train-*"}, {"split": "validation", "path": "Ramabhadra1/validation-*"}, {"split": "test", "path": "Ramabhadra1/test-*"}]}, {"config_name": "RamaneeyaWin", "data_files": [{"split": "train", "path": "RamaneeyaWin/train-*"}, {"split": "validation", "path": "RamaneeyaWin/validation-*"}, {"split": "test", "path": "RamaneeyaWin/test-*"}]}, {"config_name": "Ramaraja-Regular", "data_files": [{"split": "train", "path": "Ramaraja-Regular/train-*"}, {"split": "validation", "path": "Ramaraja-Regular/validation-*"}, {"split": "test", "path": "Ramaraja-Regular/test-*"}]}, {"config_name": "RaviPrakash", "data_files": [{"split": "train", "path": "RaviPrakash/train-*"}, {"split": "validation", "path": "RaviPrakash/validation-*"}, {"split": "test", "path": "RaviPrakash/test-*"}]}, {"config_name": "Sree Krushnadevaraya", "data_files": [{"split": "train", "path": "Sree Krushnadevaraya/train-*"}, {"split": "validation", "path": "Sree Krushnadevaraya/validation-*"}, {"split": "test", "path": "Sree Krushnadevaraya/test-*"}]}, {"config_name": "Subhadra", "data_files": [{"split": "train", "path": "Subhadra/train-*"}, {"split": "validation", "path": "Subhadra/validation-*"}, {"split": "test", "path": "Subhadra/test-*"}]}, {"config_name": "Suguna", "data_files": [{"split": "train", "path": "Suguna/train-*"}, {"split": "validation", "path": "Suguna/validation-*"}, {"split": "test", "path": "Suguna/test-*"}]}, {"config_name": "Suranna", "data_files": [{"split": "train", "path": "Suranna/train-*"}, {"split": "validation", "path": "Suranna/validation-*"}, {"split": "test", "path": "Suranna/test-*"}]}, {"config_name": "Suravara_Samhita", "data_files": [{"split": "train", "path": "Suravara_Samhita/train-*"}, {"split": "validation", "path": "Suravara_Samhita/validation-*"}, {"split": "test", "path": "Suravara_Samhita/test-*"}]}, {"config_name": "Suravara_Swarna", "data_files": [{"split": "train", "path": "Suravara_Swarna/train-*"}, {"split": "validation", "path": "Suravara_Swarna/validation-*"}, {"split": "test", "path": "Suravara_Swarna/test-*"}]}, {"config_name": "Suravara_Swarna_bold", "data_files": [{"split": "train", "path": "Suravara_Swarna_bold/train-*"}, {"split": "validation", "path": "Suravara_Swarna_bold/validation-*"}, {"split": "test", "path": "Suravara_Swarna_bold/test-*"}]}, {"config_name": "Suravara_Swarna_italic", "data_files": [{"split": "train", "path": "Suravara_Swarna_italic/train-*"}, {"split": "validation", "path": "Suravara_Swarna_italic/validation-*"}, {"split": "test", "path": "Suravara_Swarna_italic/test-*"}]}, {"config_name": "Suravaram", "data_files": [{"split": "train", "path": "Suravaram/train-*"}, {"split": "validation", "path": "Suravaram/validation-*"}, {"split": "test", "path": "Suravaram/test-*"}]}, {"config_name": "TLOTAmmaBI_ship", "data_files": [{"split": "train", "path": "TLOTAmmaBI_ship/train-*"}, {"split": "validation", "path": "TLOTAmmaBI_ship/validation-*"}, {"split": "test", "path": "TLOTAmmaBI_ship/test-*"}]}, {"config_name": "TLOTAmmaB_ship", "data_files": [{"split": "train", "path": "TLOTAmmaB_ship/train-*"}, {"split": "validation", "path": "TLOTAmmaB_ship/validation-*"}, {"split": "test", "path": "TLOTAmmaB_ship/test-*"}]}, {"config_name": "TLOTAmmaI_ship", "data_files": [{"split": "train", "path": "TLOTAmmaI_ship/train-*"}, {"split": "validation", "path": "TLOTAmmaI_ship/validation-*"}, {"split": "test", "path": "TLOTAmmaI_ship/test-*"}]}, {"config_name": "TLOTAmmaN_ship", "data_files": [{"split": "train", "path": "TLOTAmmaN_ship/train-*"}, {"split": "validation", "path": "TLOTAmmaN_ship/validation-*"}, {"split": "test", "path": "TLOTAmmaN_ship/test-*"}]}, {"config_name": "TLOTAmrutaBI_Ship", "data_files": [{"split": "train", "path": "TLOTAmrutaBI_Ship/train-*"}, {"split": "validation", "path": "TLOTAmrutaBI_Ship/validation-*"}, {"split": "test", "path": "TLOTAmrutaBI_Ship/test-*"}]}, {"config_name": "TLOTAmrutaB_Ship", "data_files": [{"split": "train", "path": "TLOTAmrutaB_Ship/train-*"}, {"split": "validation", "path": "TLOTAmrutaB_Ship/validation-*"}, {"split": "test", "path": "TLOTAmrutaB_Ship/test-*"}]}, {"config_name": "TLOTAtreyaBI_Ship", "data_files": [{"split": "train", "path": "TLOTAtreyaBI_Ship/train-*"}, {"split": "validation", "path": "TLOTAtreyaBI_Ship/validation-*"}, {"split": "test", "path": "TLOTAtreyaBI_Ship/test-*"}]}, {"config_name": "TLOTAtreyaB_Ship", "data_files": [{"split": "train", "path": "TLOTAtreyaB_Ship/train-*"}, {"split": "validation", "path": "TLOTAtreyaB_Ship/validation-*"}, {"split": "test", "path": "TLOTAtreyaB_Ship/test-*"}]}, {"config_name": "TLOTAtreyaI_Ship", "data_files": [{"split": "train", "path": "TLOTAtreyaI_Ship/train-*"}, {"split": "validation", "path": "TLOTAtreyaI_Ship/validation-*"}, {"split": "test", "path": "TLOTAtreyaI_Ship/test-*"}]}, {"config_name": "TLOTAtreyaN_Ship", "data_files": [{"split": "train", "path": "TLOTAtreyaN_Ship/train-*"}, {"split": "validation", "path": "TLOTAtreyaN_Ship/validation-*"}, {"split": "test", "path": "TLOTAtreyaN_Ship/test-*"}]}, {"config_name": "TLOTChandanaBI_Ship", "data_files": [{"split": "train", "path": "TLOTChandanaBI_Ship/train-*"}, {"split": "validation", "path": "TLOTChandanaBI_Ship/validation-*"}, {"split": "test", "path": "TLOTChandanaBI_Ship/test-*"}]}, {"config_name": "TLOTChandanaB_Ship", "data_files": [{"split": "train", "path": "TLOTChandanaB_Ship/train-*"}, {"split": "validation", "path": "TLOTChandanaB_Ship/validation-*"}, {"split": "test", "path": "TLOTChandanaB_Ship/test-*"}]}, {"config_name": "TLOTDevaI_Ship", "data_files": [{"split": "train", "path": "TLOTDevaI_Ship/train-*"}, {"split": "validation", "path": "TLOTDevaI_Ship/validation-*"}, {"split": "test", "path": "TLOTDevaI_Ship/test-*"}]}, {"config_name": "TLOTDevaN_Ship", "data_files": [{"split": "train", "path": "TLOTDevaN_Ship/train-*"}, {"split": "validation", "path": "TLOTDevaN_Ship/validation-*"}, {"split": "test", "path": "TLOTDevaN_Ship/test-*"}]}, {"config_name": "TLOTDraupadiBI_Ship", "data_files": [{"split": "train", "path": "TLOTDraupadiBI_Ship/train-*"}, {"split": "validation", "path": "TLOTDraupadiBI_Ship/validation-*"}, {"split": "test", "path": "TLOTDraupadiBI_Ship/test-*"}]}, {"config_name": "TLOTDraupadiB_ship", "data_files": [{"split": "train", "path": "TLOTDraupadiB_ship/train-*"}, {"split": "validation", "path": "TLOTDraupadiB_ship/validation-*"}, {"split": "test", "path": "TLOTDraupadiB_ship/test-*"}]}, {"config_name": "TLOTDraupadiI_Ship", "data_files": [{"split": "train", "path": "TLOTDraupadiI_Ship/train-*"}, {"split": "validation", "path": "TLOTDraupadiI_Ship/validation-*"}, {"split": "test", "path": "TLOTDraupadiI_Ship/test-*"}]}, {"config_name": "TLOTDraupadiN_Ship", "data_files": [{"split": "train", "path": "TLOTDraupadiN_Ship/train-*"}, {"split": "validation", "path": "TLOTDraupadiN_Ship/validation-*"}, {"split": "test", "path": "TLOTDraupadiN_Ship/test-*"}]}, {"config_name": "TLOTGolkondaBI_Ship", "data_files": [{"split": "train", "path": "TLOTGolkondaBI_Ship/train-*"}, {"split": "validation", "path": "TLOTGolkondaBI_Ship/validation-*"}, {"split": "test", "path": "TLOTGolkondaBI_Ship/test-*"}]}, {"config_name": "TLOTGolkondaB_Ship", "data_files": [{"split": "train", "path": "TLOTGolkondaB_Ship/train-*"}, {"split": "validation", "path": "TLOTGolkondaB_Ship/validation-*"}, {"split": "test", "path": "TLOTGolkondaB_Ship/test-*"}]}, {"config_name": "TLOTKrishnaB_Ship", "data_files": [{"split": "train", "path": "TLOTKrishnaB_Ship/train-*"}, {"split": "validation", "path": "TLOTKrishnaB_Ship/validation-*"}, {"split": "test", "path": "TLOTKrishnaB_Ship/test-*"}]}, {"config_name": "TLOTKrishnaI_Ship", "data_files": [{"split": "train", "path": "TLOTKrishnaI_Ship/train-*"}, {"split": "validation", "path": "TLOTKrishnaI_Ship/validation-*"}, {"split": "test", "path": "TLOTKrishnaI_Ship/test-*"}]}, {"config_name": "TLOTKrishnaN_Ship", "data_files": [{"split": "train", "path": "TLOTKrishnaN_Ship/train-*"}, {"split": "validation", "path": "TLOTKrishnaN_Ship/validation-*"}, {"split": "test", "path": "TLOTKrishnaN_Ship/test-*"}]}, {"config_name": "TLOTManuBI_Ship", "data_files": [{"split": "train", "path": "TLOTManuBI_Ship/train-*"}, {"split": "validation", "path": "TLOTManuBI_Ship/validation-*"}, {"split": "test", "path": "TLOTManuBI_Ship/test-*"}]}, {"config_name": "TLOTManuB_Ship", "data_files": [{"split": "train", "path": "TLOTManuB_Ship/train-*"}, {"split": "validation", "path": "TLOTManuB_Ship/validation-*"}, {"split": "test", "path": "TLOTManuB_Ship/test-*"}]}, {"config_name": "TLOTManuI_Ship", "data_files": [{"split": "train", "path": "TLOTManuI_Ship/train-*"}, {"split": "validation", "path": "TLOTManuI_Ship/validation-*"}, {"split": "test", "path": "TLOTManuI_Ship/test-*"}]}, {"config_name": "TLOTManuN_Ship", "data_files": [{"split": "train", "path": "TLOTManuN_Ship/train-*"}, {"split": "validation", "path": "TLOTManuN_Ship/validation-*"}, {"split": "test", "path": "TLOTManuN_Ship/test-*"}]}, {"config_name": "TLOTMenakaBI_Ship", "data_files": [{"split": "train", "path": "TLOTMenakaBI_Ship/train-*"}, {"split": "validation", "path": "TLOTMenakaBI_Ship/validation-*"}, {"split": "test", "path": "TLOTMenakaBI_Ship/test-*"}]}, {"config_name": "TLOTMenakaB_Ship", "data_files": [{"split": "train", "path": "TLOTMenakaB_Ship/train-*"}, {"split": "validation", "path": "TLOTMenakaB_Ship/validation-*"}, {"split": "test", "path": "TLOTMenakaB_Ship/test-*"}]}, {"config_name": "TLOTMenakaI_Ship", "data_files": [{"split": "train", "path": "TLOTMenakaI_Ship/train-*"}, {"split": "validation", "path": "TLOTMenakaI_Ship/validation-*"}, {"split": "test", "path": "TLOTMenakaI_Ship/test-*"}]}, {"config_name": "TLOTMenakaN_Ship", "data_files": [{"split": "train", "path": "TLOTMenakaN_Ship/train-*"}, {"split": "validation", "path": "TLOTMenakaN_Ship/validation-*"}, {"split": "test", "path": "TLOTMenakaN_Ship/test-*"}]}, {"config_name": "TLOTPavaniBI_Ship", "data_files": [{"split": "train", "path": "TLOTPavaniBI_Ship/train-*"}, {"split": "validation", "path": "TLOTPavaniBI_Ship/validation-*"}, {"split": "test", "path": "TLOTPavaniBI_Ship/test-*"}]}, {"config_name": "TLOTPavaniB_Ship", "data_files": [{"split": "train", "path": "TLOTPavaniB_Ship/train-*"}, {"split": "validation", "path": "TLOTPavaniB_Ship/validation-*"}, {"split": "test", "path": "TLOTPavaniB_Ship/test-*"}]}, {"config_name": "TLOTPriyaBI_Ship", "data_files": [{"split": "train", "path": "TLOTPriyaBI_Ship/train-*"}, {"split": "validation", "path": "TLOTPriyaBI_Ship/validation-*"}, {"split": "test", "path": "TLOTPriyaBI_Ship/test-*"}]}, {"config_name": "TLOTPriyaB_Ship", "data_files": [{"split": "train", "path": "TLOTPriyaB_Ship/train-*"}, {"split": "validation", "path": "TLOTPriyaB_Ship/validation-*"}, {"split": "test", "path": "TLOTPriyaB_Ship/test-*"}]}, {"config_name": "TLOTRajanBI_Ship", "data_files": [{"split": "train", "path": "TLOTRajanBI_Ship/train-*"}, {"split": "validation", "path": "TLOTRajanBI_Ship/validation-*"}, {"split": "test", "path": "TLOTRajanBI_Ship/test-*"}]}, {"config_name": "TLOTRajanB_Ship", "data_files": [{"split": "train", "path": "TLOTRajanB_Ship/train-*"}, {"split": "validation", "path": "TLOTRajanB_Ship/validation-*"}, {"split": "test", "path": "TLOTRajanB_Ship/test-*"}]}, {"config_name": "TLOTRajaniBI_Ship", "data_files": [{"split": "train", "path": "TLOTRajaniBI_Ship/train-*"}, {"split": "validation", "path": "TLOTRajaniBI_Ship/validation-*"}, {"split": "test", "path": "TLOTRajaniBI_Ship/test-*"}]}, {"config_name": "TLOTRajaniB_Ship", "data_files": [{"split": "train", "path": "TLOTRajaniB_Ship/train-*"}, {"split": "validation", "path": "TLOTRajaniB_Ship/validation-*"}, {"split": "test", "path": "TLOTRajaniB_Ship/test-*"}]}, {"config_name": "TLOTSanjanaBI_Ship", "data_files": [{"split": "train", "path": "TLOTSanjanaBI_Ship/train-*"}, {"split": "validation", "path": "TLOTSanjanaBI_Ship/validation-*"}, {"split": "test", "path": "TLOTSanjanaBI_Ship/test-*"}]}, {"config_name": "TLOTSanjanaB_Ship", "data_files": [{"split": "train", "path": "TLOTSanjanaB_Ship/train-*"}, {"split": "validation", "path": "TLOTSanjanaB_Ship/validation-*"}, {"split": "test", "path": "TLOTSanjanaB_Ship/test-*"}]}, {"config_name": "TLOTSitaraBI_Ship", "data_files": [{"split": "train", "path": "TLOTSitaraBI_Ship/train-*"}, {"split": "validation", "path": "TLOTSitaraBI_Ship/validation-*"}, {"split": "test", "path": "TLOTSitaraBI_Ship/test-*"}]}, {"config_name": "TLOTSitaraB_Ship", "data_files": [{"split": "train", "path": "TLOTSitaraB_Ship/train-*"}, {"split": "validation", "path": "TLOTSitaraB_Ship/validation-*"}, {"split": "test", "path": "TLOTSitaraB_Ship/test-*"}]}, {"config_name": "TLOTSwamiB", "data_files": [{"split": "train", "path": "TLOTSwamiB/train-*"}, {"split": "validation", "path": "TLOTSwamiB/validation-*"}, {"split": "test", "path": "TLOTSwamiB/test-*"}]}, {"config_name": "TLOTSwamiBI_Ship", "data_files": [{"split": "train", "path": "TLOTSwamiBI_Ship/train-*"}, {"split": "validation", "path": "TLOTSwamiBI_Ship/validation-*"}, {"split": "test", "path": "TLOTSwamiBI_Ship/test-*"}]}, {"config_name": "TLOTSwamiB_Ship", "data_files": [{"split": "train", "path": "TLOTSwamiB_Ship/train-*"}, {"split": "validation", "path": "TLOTSwamiB_Ship/validation-*"}, {"split": "test", "path": "TLOTSwamiB_Ship/test-*"}]}, {"config_name": "TLOTVennela1B_Ship", "data_files": [{"split": "train", "path": "TLOTVennela1B_Ship/train-*"}, {"split": "validation", "path": "TLOTVennela1B_Ship/validation-*"}, {"split": "test", "path": "TLOTVennela1B_Ship/test-*"}]}, {"config_name": "TLOTVennelaBI_Ship", "data_files": [{"split": "train", "path": "TLOTVennelaBI_Ship/train-*"}, {"split": "validation", "path": "TLOTVennelaBI_Ship/validation-*"}, {"split": "test", "path": "TLOTVennelaBI_Ship/test-*"}]}, {"config_name": "TLOTVennelaI_Ship", "data_files": [{"split": "train", "path": "TLOTVennelaI_Ship/train-*"}, {"split": "validation", "path": "TLOTVennelaI_Ship/validation-*"}, {"split": "test", "path": "TLOTVennelaI_Ship/test-*"}]}, {"config_name": "TLOTVennelaN_Ship", "data_files": [{"split": "train", "path": "TLOTVennelaN_Ship/train-*"}, {"split": "validation", "path": "TLOTVennelaN_Ship/validation-*"}, {"split": "test", "path": "TLOTVennelaN_Ship/test-*"}]}, {"config_name": "TenaliRamakrishna-Regular", "data_files": [{"split": "train", "path": "TenaliRamakrishna-Regular/train-*"}, {"split": "validation", "path": "TenaliRamakrishna-Regular/validation-*"}, {"split": "test", "path": "TenaliRamakrishna-Regular/test-*"}]}, {"config_name": "Tikkana", "data_files": [{"split": "train", "path": "Tikkana/train-*"}, {"split": "validation", "path": "Tikkana/validation-*"}, {"split": "test", "path": "Tikkana/test-*"}]}, {"config_name": "TimmanaRegular", "data_files": [{"split": "train", "path": "TimmanaRegular/train-*"}, {"split": "validation", "path": "TimmanaRegular/validation-*"}, {"split": "test", "path": "TimmanaRegular/test-*"}]}, {"config_name": "Vajram", "data_files": [{"split": "train", "path": "Vajram/train-*"}, {"split": "validation", "path": "Vajram/validation-*"}, {"split": "test", "path": "Vajram/test-*"}]}, {"config_name": "Vani", "data_files": [{"split": "train", "path": "Vani/train-*"}, {"split": "validation", "path": "Vani/validation-*"}, {"split": "test", "path": "Vani/test-*"}]}, {"config_name": "Vanib", "data_files": [{"split": "train", "path": "Vanib/train-*"}, {"split": "validation", "path": "Vanib/validation-*"}, {"split": "test", "path": "Vanib/test-*"}]}, {"config_name": "Vemana", "data_files": [{"split": "train", "path": "Vemana/train-*"}, {"split": "validation", "path": "Vemana/validation-*"}, {"split": "test", "path": "Vemana/test-*"}]}, {"config_name": "akshar", "data_files": [{"split": "train", "path": "akshar/train-*"}, {"split": "validation", "path": "akshar/validation-*"}, {"split": "test", "path": "akshar/test-*"}]}, {"config_name": "gautami", "data_files": [{"split": "train", "path": "gautami/train-*"}, {"split": "validation", "path": "gautami/validation-*"}, {"split": "test", "path": "gautami/test-*"}]}, {"config_name": "gautamib", "data_files": [{"split": "train", "path": "gautamib/train-*"}, {"split": "validation", "path": "gautamib/validation-*"}, {"split": "test", "path": "gautamib/test-*"}]}, {"config_name": "lohit_te", "data_files": [{"split": "train", "path": "lohit_te/train-*"}, {"split": "validation", "path": "lohit_te/validation-*"}, {"split": "test", "path": "lohit_te/test-*"}]}]}
2023-12-15T23:32:02+00:00
[]
[ "te" ]
TAGS #task_categories-image-to-text #size_categories-1M<n<10M #language-Telugu #license-gpl-3.0 #region-us
# Dataset Card for Dataset Name ## Dataset Details ### Dataset Description This extensive dataset, hosted on Huggingface, is a comprehensive resource for Optical Character Recognition (OCR) in the Telugu language, featuring an impressive array of 90+ configurations. Each configuration in this dataset corresponds to a unique font, meticulously curated by Dr. Rakesh Achanta and sourced from his GitHub repository (URL The dataset is specifically designed to support and enhance the development of OCR models, ranging from simple Convolutional Recurrent Neural Network (CRNN) architectures to more advanced systems like trOCR. The versatility of this dataset lies in its large volume and diversity, making it an ideal choice for researchers and developers aiming to build robust OCR systems for the Telugu script. Key Features: - Font Diversity: Over 90 unique fonts, each forming a separate configuration, providing a rich variety in text styles and nuances. - Large Volume: Each configuration contains approximately 800,000 examples, summing up to a vast pool of data for comprehensive training and evaluation. - Data Split: The dataset is pre-split into training, validation, and test sets, following a 60/20/20 ratio, to facilitate efficient model training and benchmarking. - Use Cases: Ideal for developing a wide range of OCR models - from basic CRNNs to sophisticated models like trOCR. - Accessibility: Hosted on Huggingface, ensuring easy access and integration with various machine learning frameworks and tools. This dataset stands as a testament to Dr. Rakesh Achanta's dedication to enhancing Telugu language processing technologies. It is not just a tool for model development but also a gateway to preserving and digitizing the rich literary heritage of the Telugu language. Researchers and developers leveraging this dataset are encouraged to adhere to the ethical guidelines of AI research and development, ensuring that the applications developed are for the benefit of language preservation, accessibility, and technological advancement in a responsible manner. - Fonts Curated by: Dr. Rakesh Achanta - Shared by: Krishna Sriharsha Gundu - Language(s) (NLP): Telugu ### Dataset Sources [optional] - Repository: Original Books Dataset
[ "# Dataset Card for Dataset Name", "## Dataset Details", "### Dataset Description\n\n\nThis extensive dataset, hosted on Huggingface, is a comprehensive resource for Optical Character Recognition (OCR) in the Telugu language, featuring an impressive array of 90+ configurations. Each configuration in this dataset corresponds to a unique font, meticulously curated by Dr. Rakesh Achanta and sourced from his GitHub repository (URL\n\nThe dataset is specifically designed to support and enhance the development of OCR models, ranging from simple Convolutional Recurrent Neural Network (CRNN) architectures to more advanced systems like trOCR. The versatility of this dataset lies in its large volume and diversity, making it an ideal choice for researchers and developers aiming to build robust OCR systems for the Telugu script.\n\nKey Features:\n\n - Font Diversity: Over 90 unique fonts, each forming a separate configuration, providing a rich variety in text styles and nuances.\n - Large Volume: Each configuration contains approximately 800,000 examples, summing up to a vast pool of data for comprehensive training and evaluation.\n - Data Split: The dataset is pre-split into training, validation, and test sets, following a 60/20/20 ratio, to facilitate efficient model training and benchmarking.\n - Use Cases: Ideal for developing a wide range of OCR models - from basic CRNNs to sophisticated models like trOCR.\n - Accessibility: Hosted on Huggingface, ensuring easy access and integration with various machine learning frameworks and tools.\n\nThis dataset stands as a testament to Dr. Rakesh Achanta's dedication to enhancing Telugu language processing technologies. It is not just a tool for model development but also a gateway to preserving and digitizing the rich literary heritage of the Telugu language.\n\nResearchers and developers leveraging this dataset are encouraged to adhere to the ethical guidelines of AI research and development, ensuring that the applications developed are for the benefit of language preservation, accessibility, and technological advancement in a responsible manner.\n\n\n\n- Fonts Curated by: Dr. Rakesh Achanta\n- Shared by: Krishna Sriharsha Gundu\n- Language(s) (NLP): Telugu", "### Dataset Sources [optional]\n\n\n\n- Repository: Original Books Dataset" ]
[ "TAGS\n#task_categories-image-to-text #size_categories-1M<n<10M #language-Telugu #license-gpl-3.0 #region-us \n", "# Dataset Card for Dataset Name", "## Dataset Details", "### Dataset Description\n\n\nThis extensive dataset, hosted on Huggingface, is a comprehensive resource for Optical Character Recognition (OCR) in the Telugu language, featuring an impressive array of 90+ configurations. Each configuration in this dataset corresponds to a unique font, meticulously curated by Dr. Rakesh Achanta and sourced from his GitHub repository (URL\n\nThe dataset is specifically designed to support and enhance the development of OCR models, ranging from simple Convolutional Recurrent Neural Network (CRNN) architectures to more advanced systems like trOCR. The versatility of this dataset lies in its large volume and diversity, making it an ideal choice for researchers and developers aiming to build robust OCR systems for the Telugu script.\n\nKey Features:\n\n - Font Diversity: Over 90 unique fonts, each forming a separate configuration, providing a rich variety in text styles and nuances.\n - Large Volume: Each configuration contains approximately 800,000 examples, summing up to a vast pool of data for comprehensive training and evaluation.\n - Data Split: The dataset is pre-split into training, validation, and test sets, following a 60/20/20 ratio, to facilitate efficient model training and benchmarking.\n - Use Cases: Ideal for developing a wide range of OCR models - from basic CRNNs to sophisticated models like trOCR.\n - Accessibility: Hosted on Huggingface, ensuring easy access and integration with various machine learning frameworks and tools.\n\nThis dataset stands as a testament to Dr. Rakesh Achanta's dedication to enhancing Telugu language processing technologies. It is not just a tool for model development but also a gateway to preserving and digitizing the rich literary heritage of the Telugu language.\n\nResearchers and developers leveraging this dataset are encouraged to adhere to the ethical guidelines of AI research and development, ensuring that the applications developed are for the benefit of language preservation, accessibility, and technological advancement in a responsible manner.\n\n\n\n- Fonts Curated by: Dr. Rakesh Achanta\n- Shared by: Krishna Sriharsha Gundu\n- Language(s) (NLP): Telugu", "### Dataset Sources [optional]\n\n\n\n- Repository: Original Books Dataset" ]
[ 42, 8, 4, 494, 19 ]
[ "passage: TAGS\n#task_categories-image-to-text #size_categories-1M<n<10M #language-Telugu #license-gpl-3.0 #region-us \n# Dataset Card for Dataset Name## Dataset Details" ]
1e2ac223250fc7a93b61b365b8c3195650d14e11
# Dataset Card for end2end_textclassification_with_metadata This dataset has been created with [Argilla](https://docs.argilla.io). As shown in the sections below, this dataset can be loaded into Argilla as explained in [Load with Argilla](#load-with-argilla), or used directly with the `datasets` library in [Load with `datasets`](#load-with-datasets). ## Dataset Description - **Homepage:** https://argilla.io - **Repository:** https://github.com/argilla-io/argilla - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This dataset contains: * A dataset configuration file conforming to the Argilla dataset format named `argilla.yaml`. This configuration file will be used to configure the dataset when using the `FeedbackDataset.from_huggingface` method in Argilla. * Dataset records in a format compatible with HuggingFace `datasets`. These records will be loaded automatically when using `FeedbackDataset.from_huggingface` and can be loaded independently using the `datasets` library via `load_dataset`. * The [annotation guidelines](#annotation-guidelines) that have been used for building and curating the dataset, if they've been defined in Argilla. ### Load with Argilla To load with Argilla, you'll just need to install Argilla as `pip install argilla --upgrade` and then use the following code: ```python import argilla as rg ds = rg.FeedbackDataset.from_huggingface("argilla/end2end_textclassification_with_metadata") ``` ### Load with `datasets` To load this dataset with `datasets`, you'll just need to install `datasets` as `pip install datasets --upgrade` and then use the following code: ```python from datasets import load_dataset ds = load_dataset("argilla/end2end_textclassification_with_metadata") ``` ### Supported Tasks and Leaderboards This dataset can contain [multiple fields, questions and responses](https://docs.argilla.io/en/latest/conceptual_guides/data_model.html#feedback-dataset) so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the [Dataset Structure section](#dataset-structure). There are no leaderboards associated with this dataset. ### Languages [More Information Needed] ## Dataset Structure ### Data in Argilla The dataset is created in Argilla with: **fields**, **questions**, **suggestions**, **metadata**, **vectors**, and **guidelines**. The **fields** are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. | Field Name | Title | Type | Required | Markdown | | ---------- | ----- | ---- | -------- | -------- | | text | Text | FieldTypes.text | True | False | The **questions** are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label_selection, multi_label_selection, or ranking. | Question Name | Title | Type | Required | Description | Values/Labels | | ------------- | ----- | ---- | -------- | ----------- | ------------- | | label | Label | QuestionTypes.label_selection | True | N/A | ['World', 'Sports', 'Business', 'Sci/Tech'] | The **suggestions** are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending "-suggestion" and "-suggestion-metadata" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with "-suggestion" and the metadata is appended with "-suggestion-metadata". The **metadata** is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the `metadata_properties` defined in the dataset configuration file in `argilla.yaml`. | Metadata Name | Title | Type | Values | Visible for Annotators | | ------------- | ----- | ---- | ------ | ---------------------- | | group | Annotation Group | terms | ['group-1', 'group-2', 'group-3'] | True | | length | Length of the text | integer | 100 - 862 | True | | length_std | Standard deviation of the length of the text | float | 139.096 - 361.398 | True | The **guidelines**, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the [annotation guidelines](#annotation-guidelines) section. ### Data Instances An example of a dataset instance in Argilla looks as follows: ```json { "external_id": "record-0", "fields": { "text": "Wall St. Bears Claw Back Into the Black (Reuters) Reuters - Short-sellers, Wall Street\u0027s dwindling\\band of ultra-cynics, are seeing green again." }, "metadata": { "group": "group-2", "length": 144, "length_std": 144.0 }, "responses": [], "suggestions": [], "vectors": {} } ``` While the same record in HuggingFace `datasets` looks as follows: ```json { "external_id": "record-0", "label": [], "label-suggestion": null, "label-suggestion-metadata": { "agent": null, "score": null, "type": null }, "metadata": "{\"group\": \"group-2\", \"length\": 144, \"length_std\": 144.0}", "text": "Wall St. Bears Claw Back Into the Black (Reuters) Reuters - Short-sellers, Wall Street\u0027s dwindling\\band of ultra-cynics, are seeing green again." } ``` ### Data Fields Among the dataset fields, we differentiate between the following: * **Fields:** These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. * **text** is of type `FieldTypes.text`. * **Questions:** These are the questions that will be asked to the annotators. They can be of different types, such as `RatingQuestion`, `TextQuestion`, `LabelQuestion`, `MultiLabelQuestion`, and `RankingQuestion`. * **label** is of type `QuestionTypes.label_selection` with the following allowed values ['World', 'Sports', 'Business', 'Sci/Tech']. * **Suggestions:** As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable. * (optional) **label-suggestion** is of type `QuestionTypes.label_selection` with the following allowed values ['World', 'Sports', 'Business', 'Sci/Tech']. Additionally, we also have two more fields that are optional and are the following: * **metadata:** This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the `metadata_properties` defined in the dataset configuration file in `argilla.yaml`. * **external_id:** This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file. ### Data Splits The dataset contains a single split, which is `train`. ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation guidelines Classify the articles into one of the four categories. #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
argilla/end2end_textclassification_with_metadata
[ "size_categories:1K<n<10K", "rlfh", "argilla", "human-feedback", "region:us" ]
2023-11-29T14:38:01+00:00
{"size_categories": "1K<n<10K", "tags": ["rlfh", "argilla", "human-feedback"]}
2024-02-13T00:57:31+00:00
[]
[]
TAGS #size_categories-1K<n<10K #rlfh #argilla #human-feedback #region-us
Dataset Card for end2end\_textclassification\_with\_metadata ============================================================ This dataset has been created with Argilla. As shown in the sections below, this dataset can be loaded into Argilla as explained in Load with Argilla, or used directly with the 'datasets' library in Load with 'datasets'. Dataset Description ------------------- * Homepage: URL * Repository: URL * Paper: * Leaderboard: * Point of Contact: ### Dataset Summary This dataset contains: * A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\_huggingface' method in Argilla. * Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\_huggingface' and can be loaded independently using the 'datasets' library via 'load\_dataset'. * The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla. ### Load with Argilla To load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code: ### Load with 'datasets' To load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code: ### Supported Tasks and Leaderboards This dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section. There are no leaderboards associated with this dataset. ### Languages Dataset Structure ----------------- ### Data in Argilla The dataset is created in Argilla with: fields, questions, suggestions, metadata, vectors, and guidelines. The fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. The questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\_selection, multi\_label\_selection, or ranking. The suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending "-suggestion" and "-suggestion-metadata" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with "-suggestion" and the metadata is appended with "-suggestion-metadata". The metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\_properties' defined in the dataset configuration file in 'URL'. The guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section. ### Data Instances An example of a dataset instance in Argilla looks as follows: While the same record in HuggingFace 'datasets' looks as follows: ### Data Fields Among the dataset fields, we differentiate between the following: * Fields: These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. + text is of type 'URL'. * Questions: These are the questions that will be asked to the annotators. They can be of different types, such as 'RatingQuestion', 'TextQuestion', 'LabelQuestion', 'MultiLabelQuestion', and 'RankingQuestion'. + label is of type 'QuestionTypes.label\_selection' with the following allowed values ['World', 'Sports', 'Business', 'Sci/Tech']. * Suggestions: As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable. + (optional) label-suggestion is of type 'QuestionTypes.label\_selection' with the following allowed values ['World', 'Sports', 'Business', 'Sci/Tech']. Additionally, we also have two more fields that are optional and are the following: * metadata: This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\_properties' defined in the dataset configuration file in 'URL'. * external\_id: This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file. ### Data Splits The dataset contains a single split, which is 'train'. Dataset Creation ---------------- ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation guidelines Classify the articles into one of the four categories. #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information Considerations for Using the Data --------------------------------- ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations Additional Information ---------------------- ### Dataset Curators ### Licensing Information ### Contributions
[ "### Dataset Summary\n\n\nThis dataset contains:\n\n\n* A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\\_huggingface' method in Argilla.\n* Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\\_huggingface' and can be loaded independently using the 'datasets' library via 'load\\_dataset'.\n* The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla.", "### Load with Argilla\n\n\nTo load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code:", "### Load with 'datasets'\n\n\nTo load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code:", "### Supported Tasks and Leaderboards\n\n\nThis dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section.\n\n\nThere are no leaderboards associated with this dataset.", "### Languages\n\n\nDataset Structure\n-----------------", "### Data in Argilla\n\n\nThe dataset is created in Argilla with: fields, questions, suggestions, metadata, vectors, and guidelines.\n\n\nThe fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\nThe questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\\_selection, multi\\_label\\_selection, or ranking.\n\n\n\nThe suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending \"-suggestion\" and \"-suggestion-metadata\" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with \"-suggestion\" and the metadata is appended with \"-suggestion-metadata\".\n\n\nThe metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n\n\n\nThe guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section.", "### Data Instances\n\n\nAn example of a dataset instance in Argilla looks as follows:\n\n\nWhile the same record in HuggingFace 'datasets' looks as follows:", "### Data Fields\n\n\nAmong the dataset fields, we differentiate between the following:\n\n\n* Fields: These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\t+ text is of type 'URL'.\n* Questions: These are the questions that will be asked to the annotators. They can be of different types, such as 'RatingQuestion', 'TextQuestion', 'LabelQuestion', 'MultiLabelQuestion', and 'RankingQuestion'.\n\n\n\t+ label is of type 'QuestionTypes.label\\_selection' with the following allowed values ['World', 'Sports', 'Business', 'Sci/Tech'].\n* Suggestions: As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable.\n\n\n\t+ (optional) label-suggestion is of type 'QuestionTypes.label\\_selection' with the following allowed values ['World', 'Sports', 'Business', 'Sci/Tech'].\n\n\nAdditionally, we also have two more fields that are optional and are the following:\n\n\n* metadata: This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n* external\\_id: This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file.", "### Data Splits\n\n\nThe dataset contains a single split, which is 'train'.\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation guidelines\n\n\nClassify the articles into one of the four categories.", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#size_categories-1K<n<10K #rlfh #argilla #human-feedback #region-us \n", "### Dataset Summary\n\n\nThis dataset contains:\n\n\n* A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\\_huggingface' method in Argilla.\n* Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\\_huggingface' and can be loaded independently using the 'datasets' library via 'load\\_dataset'.\n* The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla.", "### Load with Argilla\n\n\nTo load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code:", "### Load with 'datasets'\n\n\nTo load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code:", "### Supported Tasks and Leaderboards\n\n\nThis dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section.\n\n\nThere are no leaderboards associated with this dataset.", "### Languages\n\n\nDataset Structure\n-----------------", "### Data in Argilla\n\n\nThe dataset is created in Argilla with: fields, questions, suggestions, metadata, vectors, and guidelines.\n\n\nThe fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\nThe questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\\_selection, multi\\_label\\_selection, or ranking.\n\n\n\nThe suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending \"-suggestion\" and \"-suggestion-metadata\" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with \"-suggestion\" and the metadata is appended with \"-suggestion-metadata\".\n\n\nThe metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n\n\n\nThe guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section.", "### Data Instances\n\n\nAn example of a dataset instance in Argilla looks as follows:\n\n\nWhile the same record in HuggingFace 'datasets' looks as follows:", "### Data Fields\n\n\nAmong the dataset fields, we differentiate between the following:\n\n\n* Fields: These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\t+ text is of type 'URL'.\n* Questions: These are the questions that will be asked to the annotators. They can be of different types, such as 'RatingQuestion', 'TextQuestion', 'LabelQuestion', 'MultiLabelQuestion', and 'RankingQuestion'.\n\n\n\t+ label is of type 'QuestionTypes.label\\_selection' with the following allowed values ['World', 'Sports', 'Business', 'Sci/Tech'].\n* Suggestions: As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable.\n\n\n\t+ (optional) label-suggestion is of type 'QuestionTypes.label\\_selection' with the following allowed values ['World', 'Sports', 'Business', 'Sci/Tech'].\n\n\nAdditionally, we also have two more fields that are optional and are the following:\n\n\n* metadata: This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n* external\\_id: This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file.", "### Data Splits\n\n\nThe dataset contains a single split, which is 'train'.\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation guidelines\n\n\nClassify the articles into one of the four categories.", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 29, 162, 40, 53, 68, 11, 404, 40, 508, 27, 7, 4, 10, 10, 5, 17, 5, 9, 18, 7, 8, 14, 6, 6, 5 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #rlfh #argilla #human-feedback #region-us \n### Dataset Summary\n\n\nThis dataset contains:\n\n\n* A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\\_huggingface' method in Argilla.\n* Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\\_huggingface' and can be loaded independently using the 'datasets' library via 'load\\_dataset'.\n* The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla.### Load with Argilla\n\n\nTo load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code:### Load with 'datasets'\n\n\nTo load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code:### Supported Tasks and Leaderboards\n\n\nThis dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section.\n\n\nThere are no leaderboards associated with this dataset.### Languages\n\n\nDataset Structure\n-----------------", "passage: ### Data in Argilla\n\n\nThe dataset is created in Argilla with: fields, questions, suggestions, metadata, vectors, and guidelines.\n\n\nThe fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\nThe questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\\_selection, multi\\_label\\_selection, or ranking.\n\n\n\nThe suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending \"-suggestion\" and \"-suggestion-metadata\" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with \"-suggestion\" and the metadata is appended with \"-suggestion-metadata\".\n\n\nThe metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n\n\n\nThe guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section.### Data Instances\n\n\nAn example of a dataset instance in Argilla looks as follows:\n\n\nWhile the same record in HuggingFace 'datasets' looks as follows:" ]
a1cedfdf2d4c036f3d5da07f6586bf54806e4b2f
# Dataset Card for end2end_textclassification_with_vectors This dataset has been created with [Argilla](https://docs.argilla.io). As shown in the sections below, this dataset can be loaded into Argilla as explained in [Load with Argilla](#load-with-argilla), or used directly with the `datasets` library in [Load with `datasets`](#load-with-datasets). ## Dataset Description - **Homepage:** https://argilla.io - **Repository:** https://github.com/argilla-io/argilla - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This dataset contains: * A dataset configuration file conforming to the Argilla dataset format named `argilla.yaml`. This configuration file will be used to configure the dataset when using the `FeedbackDataset.from_huggingface` method in Argilla. * Dataset records in a format compatible with HuggingFace `datasets`. These records will be loaded automatically when using `FeedbackDataset.from_huggingface` and can be loaded independently using the `datasets` library via `load_dataset`. * The [annotation guidelines](#annotation-guidelines) that have been used for building and curating the dataset, if they've been defined in Argilla. ### Load with Argilla To load with Argilla, you'll just need to install Argilla as `pip install argilla --upgrade` and then use the following code: ```python import argilla as rg ds = rg.FeedbackDataset.from_huggingface("argilla/end2end_textclassification_with_vectors") ``` ### Load with `datasets` To load this dataset with `datasets`, you'll just need to install `datasets` as `pip install datasets --upgrade` and then use the following code: ```python from datasets import load_dataset ds = load_dataset("argilla/end2end_textclassification_with_vectors") ``` ### Supported Tasks and Leaderboards This dataset can contain [multiple fields, questions and responses](https://docs.argilla.io/en/latest/conceptual_guides/data_model.html#feedback-dataset) so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the [Dataset Structure section](#dataset-structure). There are no leaderboards associated with this dataset. ### Languages [More Information Needed] ## Dataset Structure ### Data in Argilla The dataset is created in Argilla with: **fields**, **questions**, **suggestions**, **metadata**, **vectors**, and **guidelines**. The **fields** are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. | Field Name | Title | Type | Required | Markdown | | ---------- | ----- | ---- | -------- | -------- | | text | Text | FieldTypes.text | True | False | The **questions** are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label_selection, multi_label_selection, or ranking. | Question Name | Title | Type | Required | Description | Values/Labels | | ------------- | ----- | ---- | -------- | ----------- | ------------- | | label | Label | QuestionTypes.label_selection | True | N/A | ['World', 'Sports', 'Business', 'Sci/Tech'] | The **suggestions** are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending "-suggestion" and "-suggestion-metadata" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with "-suggestion" and the metadata is appended with "-suggestion-metadata". The **metadata** is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the `metadata_properties` defined in the dataset configuration file in `argilla.yaml`. **✨ NEW** The **vectors** are different columns that contain a vector in floating point, which is constraint to the pre-defined dimensions in the **vectors_settings** when configuring the vectors within the dataset itself, also the dimensions will always be 1-dimensional. The **vectors** are optional and identified by the pre-defined vector name in the dataset configuration file in `argilla.yaml`. | Vector Name | Title | Dimensions | |-------------|-------|------------| | sentence_embedding | Sentence Embedding | [1, 384] | | Metadata Name | Title | Type | Values | Visible for Annotators | | ------------- | ----- | ---- | ------ | ---------------------- | The **guidelines**, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the [annotation guidelines](#annotation-guidelines) section. ### Data Instances An example of a dataset instance in Argilla looks as follows: ```json { "external_id": "record-0", "fields": { "text": "Wall St. Bears Claw Back Into the Black (Reuters) Reuters - Short-sellers, Wall Street\u0027s dwindling\\band of ultra-cynics, are seeing green again." }, "metadata": {}, "responses": [], "suggestions": [], "vectors": { "sentence_embedding": [ -0.12043723464012146, -0.20540212094783783, -0.0035010750871151686, -0.00980630237609148, 0.1692187786102295, 0.1105300635099411, -0.5263048410415649, 0.06751782447099686, 0.4591887891292572, 0.12288972735404968, -0.04069870337843895, 0.14249269664287567, -0.4193352162837982, 0.3089897334575653, -0.002920431550592184, -0.3318617343902588, 0.4440504312515259, 0.0024764006957411766, -0.5200819373130798, 0.4191458225250244, -0.2874358892440796, -0.31848442554473877, 0.46103376150131226, -0.2150607705116272, 0.4638316333293915, -0.5393298864364624, 0.11619565635919571, 0.12209033966064453, -0.5150607228279114, -1.5762522220611572, -0.3548111319541931, -0.3022597134113312, -0.21581022441387177, -0.04001490771770477, 0.3795183300971985, -0.4639199376106262, -0.18638719618320465, 0.34496206045150757, 0.19537486135959625, 0.464184045791626, 0.1232186034321785, 0.16397443413734436, -0.20278260111808777, -0.1903568059206009, 0.06815202534198761, -0.3819412291049957, 0.26536446809768677, -0.13817478716373444, 0.14375346899032593, -0.09447652846574783, 0.2094372808933258, -0.02289251796901226, 0.41208600997924805, 0.1988988220691681, 0.14554573595523834, 0.27547740936279297, 0.5047193169593811, 0.1193053349852562, 0.1329369843006134, 0.008757795207202435, 0.5858960747718811, 0.10716624557971954, -1.2744114398956299, -0.015607771463692188, 0.4341447949409485, -0.18675705790519714, 0.0012254089815542102, 0.12185507267713547, -0.46970999240875244, 0.2139795422554016, 0.08228280395269394, 0.02936917170882225, -0.12386734038591385, 0.1562434434890747, 0.17051148414611816, -0.2713660001754761, 0.038224536925554276, -0.45389631390571594, -0.31998828053474426, -0.11758271604776382, -0.13718140125274658, -0.33116868138313293, -0.4639136493206024, -0.4220881164073944, -0.03149694949388504, -0.13691803812980652, -0.05776222050189972, 0.041543979197740555, 0.5278537273406982, 0.4890863299369812, -0.07661459594964981, -0.18313054740428925, 0.21626459062099457, -0.11445435136556625, -0.9753242135047913, -0.4595167636871338, 0.34693828225135803, 0.2784789502620697, 0.15612299740314484, 0.8795897364616394, -0.208233043551445, 0.25665318965911865, 0.664867103099823, 0.10006468743085861, 0.3717508912086487, -0.19154797494411469, -0.044070228934288025, 0.19016152620315552, -0.43286705017089844, 0.13931529223918915, 0.16061720252037048, -0.14001505076885223, 0.2723529040813446, 0.010507846251130104, -0.043816640973091125, -0.3933525085449219, 0.07633335143327713, 0.16074138879776, 0.25545138120651245, 0.10411184281110764, -0.252656489610672, -0.014079650864005089, 0.6464551687240601, -0.5342252850532532, 0.13398316502571106, -0.19860437512397766, -0.04238532856106758, 0.7170720100402832, 0.20766960084438324, 0.3631458282470703, 0.3689534068107605, -0.637426495552063, 0.04991120100021362, -0.24049770832061768, -0.4963647425174713, -0.18716897070407867, -0.057773757725954056, -0.027187222614884377, 0.3510451018810272, -0.13376864790916443, -0.5133410692214966, -0.41071003675460815, -0.25056102871894836, -0.7314324378967285, -0.023162752389907837, -0.2013019472360611, -0.32370084524154663, 0.282392680644989, -0.13820968568325043, 0.3375529646873474, -0.3710899353027344, 0.26835963129997253, -0.11854378879070282, -0.29100847244262695, -0.07820427417755127, 0.41742604970932007, 0.18278738856315613, 0.8749655485153198, -0.12474101781845093, 0.21646760404109955, 0.5324461460113525, 0.01801791973412037, -0.41473129391670227, 0.3448951542377472, 0.31545397639274597, -0.06652028113603592, -0.12838466465473175, 0.07816355675458908, -0.06292332708835602, -0.04791972413659096, -0.06891685724258423, 0.6116467714309692, 0.20266728103160858, -0.20389209687709808, 0.4481717646121979, -0.14994290471076965, -0.8113712072372437, -0.8815931081771851, -0.44949814677238464, 0.2717459797859192, 0.08912745118141174, -0.5202239155769348, -0.597383975982666, 0.3199369013309479, -0.10234680771827698, -0.35641443729400635, -0.240205317735672, 0.12303880602121353, 0.3744034767150879, 0.4259321987628937, 0.07845962047576904, 0.020776493474841118, -0.22882433235645294, 0.13580550253391266, 0.40983065962791443, 0.16749930381774902, -0.49101221561431885, -0.20313602685928345, 0.18159261345863342, -0.24688513576984406, 0.17602556943893433, -0.8417962193489075, -0.33050695061683655, 0.08187534660100937, 0.35799700021743774, -0.22269625961780548, 0.026066116988658905, 0.03357049077749252, 0.34546494483947754, -0.09888196736574173, 0.3313341736793518, 0.35131382942199707, 0.4556196331977844, -0.19695612788200378, 0.1761651486158371, -0.6264569759368896, -0.07231508940458298, 0.10336003452539444, 0.07752178609371185, 0.5530408620834351, 0.01386663131415844, 0.7373855113983154, -0.36467820405960083, -1.450913906097412, -0.08285603672266006, -0.25894221663475037, -0.3406654894351959, 0.11599279940128326, -0.08816834539175034, 0.05016550049185753, 0.10680476576089859, 0.7345742583274841, 0.5545358657836914, 0.03725026920437813, -0.013942588120698929, 0.15905942022800446, 0.10697169601917267, -0.1159278005361557, -0.14520259201526642, -0.11516434699296951, 0.009737834334373474, -0.21393126249313354, 0.2254694700241089, -0.26198986172676086, 0.38086384534835815, -0.45145368576049805, -0.03374387323856354, 0.7591500878334045, -0.044888246804475784, 1.1086310148239136, 0.6878536343574524, 0.11878693848848343, -0.23152923583984375, -0.2290886491537094, 0.5528044104576111, -0.1958092898130417, -0.7428138852119446, 0.4733226001262665, 0.6412751078605652, 0.5031447410583496, 0.016923299059271812, -0.18432317674160004, 0.35899147391319275, -0.11351587623357773, 0.2660171687602997, 0.15020841360092163, -0.22641459107398987, -0.025799907743930817, -0.06872990727424622, 0.547649621963501, 0.025877539068460464, -0.15108099579811096, 0.1667388379573822, 0.022908709943294525, -0.19313660264015198, 0.24992817640304565, 0.344704270362854, 0.5190809369087219, -0.11737774312496185, -0.2888471186161041, -0.07916659861803055, 0.2710835635662079, -0.006746355444192886, 0.36876654624938965, -0.18921276926994324, 0.09448281675577164, -0.18097177147865295, 0.1651483029127121, 0.06502965092658997, -0.06709907203912735, -0.1526907980442047, -0.007644282653927803, 0.16943910717964172, -0.21555569767951965, 0.7630714774131775, 0.016727717593312263, -0.5200330018997192, -0.0036530615761876106, 0.09650308638811111, 0.2635899782180786, -0.3506390154361725, -0.3254963159561157, -0.34183427691459656, 0.31788069009780884, -0.12417469918727875, 0.455859899520874, -0.08290059864521027, -0.015282154083251953, 0.1094369888305664, 0.26194971799850464, -0.6269403100013733, 0.43764355778694153, -0.11381351947784424, 0.1532089114189148, -0.05476648733019829, -0.6155551075935364, -0.26819321513175964, 0.4747026562690735, -0.21404141187667847, -1.6814441680908203, -0.33003154397010803, -0.06825298070907593, 0.16378769278526306, -0.186134472489357, 0.17319530248641968, 0.04992658272385597, 0.4345815181732178, -0.2896181643009186, 0.3351740539073944, -0.12574052810668945, 0.3193739354610443, 0.07733222097158432, 0.0655708909034729, 0.07784054428339005, -0.08181453496217728, 0.4951131343841553, -0.2834014296531677, 0.17430414259433746, 0.11342763900756836, 0.13239474594593048, -0.09465833008289337, 1.4634194374084473, -0.014428001828491688, 0.2063509076833725, 0.18044200539588928, -0.03166770190000534, 0.04698008671402931, -0.0871230885386467, 0.29907214641571045, 0.6440877914428711, -0.12226957082748413, -0.035559624433517456, -0.27108025550842285, -0.03263016417622566, 0.2912079989910126, -0.06217412278056145, -0.05635988339781761, 0.48305007815361023, -0.2926708459854126, 0.19818736612796783, 0.2689226567745209, -0.3818262219429016, 0.09530964493751526, 0.4883849024772644, -0.46904870867729187, -0.40877559781074524, -0.2623692452907562, 0.343670129776001, 0.08838459104299545, -0.04895532876253128, 0.12374666333198547, -0.21999061107635498, -0.15163420140743256, -0.00264864438213408, 0.016250310465693474, -0.4864921569824219, -0.337839812040329, 0.03290683031082153, -0.4574444591999054, -0.41137075424194336, -0.35722532868385315, -0.7601121068000793, -0.45991116762161255, 0.3345332145690918 ] } } ``` While the same record in HuggingFace `datasets` looks as follows: ```json { "external_id": "record-0", "label": [], "label-suggestion": null, "label-suggestion-metadata": { "agent": null, "score": null, "type": null }, "metadata": "{}", "text": "Wall St. Bears Claw Back Into the Black (Reuters) Reuters - Short-sellers, Wall Street\u0027s dwindling\\band of ultra-cynics, are seeing green again.", "vectors": { "sentence_embedding": [ -0.12043723464012146, -0.20540212094783783, -0.0035010750871151686, -0.00980630237609148, 0.1692187786102295, 0.1105300635099411, -0.5263048410415649, 0.06751782447099686, 0.4591887891292572, 0.12288972735404968, -0.04069870337843895, 0.14249269664287567, -0.4193352162837982, 0.3089897334575653, -0.002920431550592184, -0.3318617343902588, 0.4440504312515259, 0.0024764006957411766, -0.5200819373130798, 0.4191458225250244, -0.2874358892440796, -0.31848442554473877, 0.46103376150131226, -0.2150607705116272, 0.4638316333293915, -0.5393298864364624, 0.11619565635919571, 0.12209033966064453, -0.5150607228279114, -1.5762522220611572, -0.3548111319541931, -0.3022597134113312, -0.21581022441387177, -0.04001490771770477, 0.3795183300971985, -0.4639199376106262, -0.18638719618320465, 0.34496206045150757, 0.19537486135959625, 0.464184045791626, 0.1232186034321785, 0.16397443413734436, -0.20278260111808777, -0.1903568059206009, 0.06815202534198761, -0.3819412291049957, 0.26536446809768677, -0.13817478716373444, 0.14375346899032593, -0.09447652846574783, 0.2094372808933258, -0.02289251796901226, 0.41208600997924805, 0.1988988220691681, 0.14554573595523834, 0.27547740936279297, 0.5047193169593811, 0.1193053349852562, 0.1329369843006134, 0.008757795207202435, 0.5858960747718811, 0.10716624557971954, -1.2744114398956299, -0.015607771463692188, 0.4341447949409485, -0.18675705790519714, 0.0012254089815542102, 0.12185507267713547, -0.46970999240875244, 0.2139795422554016, 0.08228280395269394, 0.02936917170882225, -0.12386734038591385, 0.1562434434890747, 0.17051148414611816, -0.2713660001754761, 0.038224536925554276, -0.45389631390571594, -0.31998828053474426, -0.11758271604776382, -0.13718140125274658, -0.33116868138313293, -0.4639136493206024, -0.4220881164073944, -0.03149694949388504, -0.13691803812980652, -0.05776222050189972, 0.041543979197740555, 0.5278537273406982, 0.4890863299369812, -0.07661459594964981, -0.18313054740428925, 0.21626459062099457, -0.11445435136556625, -0.9753242135047913, -0.4595167636871338, 0.34693828225135803, 0.2784789502620697, 0.15612299740314484, 0.8795897364616394, -0.208233043551445, 0.25665318965911865, 0.664867103099823, 0.10006468743085861, 0.3717508912086487, -0.19154797494411469, -0.044070228934288025, 0.19016152620315552, -0.43286705017089844, 0.13931529223918915, 0.16061720252037048, -0.14001505076885223, 0.2723529040813446, 0.010507846251130104, -0.043816640973091125, -0.3933525085449219, 0.07633335143327713, 0.16074138879776, 0.25545138120651245, 0.10411184281110764, -0.252656489610672, -0.014079650864005089, 0.6464551687240601, -0.5342252850532532, 0.13398316502571106, -0.19860437512397766, -0.04238532856106758, 0.7170720100402832, 0.20766960084438324, 0.3631458282470703, 0.3689534068107605, -0.637426495552063, 0.04991120100021362, -0.24049770832061768, -0.4963647425174713, -0.18716897070407867, -0.057773757725954056, -0.027187222614884377, 0.3510451018810272, -0.13376864790916443, -0.5133410692214966, -0.41071003675460815, -0.25056102871894836, -0.7314324378967285, -0.023162752389907837, -0.2013019472360611, -0.32370084524154663, 0.282392680644989, -0.13820968568325043, 0.3375529646873474, -0.3710899353027344, 0.26835963129997253, -0.11854378879070282, -0.29100847244262695, -0.07820427417755127, 0.41742604970932007, 0.18278738856315613, 0.8749655485153198, -0.12474101781845093, 0.21646760404109955, 0.5324461460113525, 0.01801791973412037, -0.41473129391670227, 0.3448951542377472, 0.31545397639274597, -0.06652028113603592, -0.12838466465473175, 0.07816355675458908, -0.06292332708835602, -0.04791972413659096, -0.06891685724258423, 0.6116467714309692, 0.20266728103160858, -0.20389209687709808, 0.4481717646121979, -0.14994290471076965, -0.8113712072372437, -0.8815931081771851, -0.44949814677238464, 0.2717459797859192, 0.08912745118141174, -0.5202239155769348, -0.597383975982666, 0.3199369013309479, -0.10234680771827698, -0.35641443729400635, -0.240205317735672, 0.12303880602121353, 0.3744034767150879, 0.4259321987628937, 0.07845962047576904, 0.020776493474841118, -0.22882433235645294, 0.13580550253391266, 0.40983065962791443, 0.16749930381774902, -0.49101221561431885, -0.20313602685928345, 0.18159261345863342, -0.24688513576984406, 0.17602556943893433, -0.8417962193489075, -0.33050695061683655, 0.08187534660100937, 0.35799700021743774, -0.22269625961780548, 0.026066116988658905, 0.03357049077749252, 0.34546494483947754, -0.09888196736574173, 0.3313341736793518, 0.35131382942199707, 0.4556196331977844, -0.19695612788200378, 0.1761651486158371, -0.6264569759368896, -0.07231508940458298, 0.10336003452539444, 0.07752178609371185, 0.5530408620834351, 0.01386663131415844, 0.7373855113983154, -0.36467820405960083, -1.450913906097412, -0.08285603672266006, -0.25894221663475037, -0.3406654894351959, 0.11599279940128326, -0.08816834539175034, 0.05016550049185753, 0.10680476576089859, 0.7345742583274841, 0.5545358657836914, 0.03725026920437813, -0.013942588120698929, 0.15905942022800446, 0.10697169601917267, -0.1159278005361557, -0.14520259201526642, -0.11516434699296951, 0.009737834334373474, -0.21393126249313354, 0.2254694700241089, -0.26198986172676086, 0.38086384534835815, -0.45145368576049805, -0.03374387323856354, 0.7591500878334045, -0.044888246804475784, 1.1086310148239136, 0.6878536343574524, 0.11878693848848343, -0.23152923583984375, -0.2290886491537094, 0.5528044104576111, -0.1958092898130417, -0.7428138852119446, 0.4733226001262665, 0.6412751078605652, 0.5031447410583496, 0.016923299059271812, -0.18432317674160004, 0.35899147391319275, -0.11351587623357773, 0.2660171687602997, 0.15020841360092163, -0.22641459107398987, -0.025799907743930817, -0.06872990727424622, 0.547649621963501, 0.025877539068460464, -0.15108099579811096, 0.1667388379573822, 0.022908709943294525, -0.19313660264015198, 0.24992817640304565, 0.344704270362854, 0.5190809369087219, -0.11737774312496185, -0.2888471186161041, -0.07916659861803055, 0.2710835635662079, -0.006746355444192886, 0.36876654624938965, -0.18921276926994324, 0.09448281675577164, -0.18097177147865295, 0.1651483029127121, 0.06502965092658997, -0.06709907203912735, -0.1526907980442047, -0.007644282653927803, 0.16943910717964172, -0.21555569767951965, 0.7630714774131775, 0.016727717593312263, -0.5200330018997192, -0.0036530615761876106, 0.09650308638811111, 0.2635899782180786, -0.3506390154361725, -0.3254963159561157, -0.34183427691459656, 0.31788069009780884, -0.12417469918727875, 0.455859899520874, -0.08290059864521027, -0.015282154083251953, 0.1094369888305664, 0.26194971799850464, -0.6269403100013733, 0.43764355778694153, -0.11381351947784424, 0.1532089114189148, -0.05476648733019829, -0.6155551075935364, -0.26819321513175964, 0.4747026562690735, -0.21404141187667847, -1.6814441680908203, -0.33003154397010803, -0.06825298070907593, 0.16378769278526306, -0.186134472489357, 0.17319530248641968, 0.04992658272385597, 0.4345815181732178, -0.2896181643009186, 0.3351740539073944, -0.12574052810668945, 0.3193739354610443, 0.07733222097158432, 0.0655708909034729, 0.07784054428339005, -0.08181453496217728, 0.4951131343841553, -0.2834014296531677, 0.17430414259433746, 0.11342763900756836, 0.13239474594593048, -0.09465833008289337, 1.4634194374084473, -0.014428001828491688, 0.2063509076833725, 0.18044200539588928, -0.03166770190000534, 0.04698008671402931, -0.0871230885386467, 0.29907214641571045, 0.6440877914428711, -0.12226957082748413, -0.035559624433517456, -0.27108025550842285, -0.03263016417622566, 0.2912079989910126, -0.06217412278056145, -0.05635988339781761, 0.48305007815361023, -0.2926708459854126, 0.19818736612796783, 0.2689226567745209, -0.3818262219429016, 0.09530964493751526, 0.4883849024772644, -0.46904870867729187, -0.40877559781074524, -0.2623692452907562, 0.343670129776001, 0.08838459104299545, -0.04895532876253128, 0.12374666333198547, -0.21999061107635498, -0.15163420140743256, -0.00264864438213408, 0.016250310465693474, -0.4864921569824219, -0.337839812040329, 0.03290683031082153, -0.4574444591999054, -0.41137075424194336, -0.35722532868385315, -0.7601121068000793, -0.45991116762161255, 0.3345332145690918 ] } } ``` ### Data Fields Among the dataset fields, we differentiate between the following: * **Fields:** These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. * **text** is of type `FieldTypes.text`. * **Questions:** These are the questions that will be asked to the annotators. They can be of different types, such as `RatingQuestion`, `TextQuestion`, `LabelQuestion`, `MultiLabelQuestion`, and `RankingQuestion`. * **label** is of type `QuestionTypes.label_selection` with the following allowed values ['World', 'Sports', 'Business', 'Sci/Tech']. * **Suggestions:** As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable. * (optional) **label-suggestion** is of type `QuestionTypes.label_selection` with the following allowed values ['World', 'Sports', 'Business', 'Sci/Tech']. * **✨ NEW** **Vectors**: As of Argilla 1.19.0, the vectors have been included in order to add support for similarity search to explore similar records based on vector search powered by the search engine defined. The vectors are optional and cannot be seen within the UI, those are uploaded and internally used. Also the vectors will always be optional, and only the dimensions previously defined in their settings. * (optional) **sentence_embedding** is of type `float32` and has a dimension of (1, `384`). Additionally, we also have two more fields that are optional and are the following: * **metadata:** This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the `metadata_properties` defined in the dataset configuration file in `argilla.yaml`. * **external_id:** This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file. ### Data Splits The dataset contains a single split, which is `train`. ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation guidelines Classify the articles into one of the four categories. #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
argilla/end2end_textclassification_with_vectors
[ "size_categories:1K<n<10K", "rlfh", "argilla", "human-feedback", "region:us" ]
2023-11-29T14:38:40+00:00
{"size_categories": "1K<n<10K", "tags": ["rlfh", "argilla", "human-feedback"]}
2024-02-13T00:58:07+00:00
[]
[]
TAGS #size_categories-1K<n<10K #rlfh #argilla #human-feedback #region-us
Dataset Card for end2end\_textclassification\_with\_vectors =========================================================== This dataset has been created with Argilla. As shown in the sections below, this dataset can be loaded into Argilla as explained in Load with Argilla, or used directly with the 'datasets' library in Load with 'datasets'. Dataset Description ------------------- * Homepage: URL * Repository: URL * Paper: * Leaderboard: * Point of Contact: ### Dataset Summary This dataset contains: * A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\_huggingface' method in Argilla. * Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\_huggingface' and can be loaded independently using the 'datasets' library via 'load\_dataset'. * The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla. ### Load with Argilla To load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code: ### Load with 'datasets' To load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code: ### Supported Tasks and Leaderboards This dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section. There are no leaderboards associated with this dataset. ### Languages Dataset Structure ----------------- ### Data in Argilla The dataset is created in Argilla with: fields, questions, suggestions, metadata, vectors, and guidelines. The fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. The questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\_selection, multi\_label\_selection, or ranking. The suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending "-suggestion" and "-suggestion-metadata" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with "-suggestion" and the metadata is appended with "-suggestion-metadata". The metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\_properties' defined in the dataset configuration file in 'URL'. NEW The vectors are different columns that contain a vector in floating point, which is constraint to the pre-defined dimensions in the vectors\_settings when configuring the vectors within the dataset itself, also the dimensions will always be 1-dimensional. The vectors are optional and identified by the pre-defined vector name in the dataset configuration file in 'URL'. Vector Name: sentence\_embedding, Title: Sentence Embedding, Dimensions: [1, 384] The guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section. ### Data Instances An example of a dataset instance in Argilla looks as follows: While the same record in HuggingFace 'datasets' looks as follows: ### Data Fields Among the dataset fields, we differentiate between the following: * Fields: These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. + text is of type 'URL'. * Questions: These are the questions that will be asked to the annotators. They can be of different types, such as 'RatingQuestion', 'TextQuestion', 'LabelQuestion', 'MultiLabelQuestion', and 'RankingQuestion'. + label is of type 'QuestionTypes.label\_selection' with the following allowed values ['World', 'Sports', 'Business', 'Sci/Tech']. * Suggestions: As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable. + (optional) label-suggestion is of type 'QuestionTypes.label\_selection' with the following allowed values ['World', 'Sports', 'Business', 'Sci/Tech']. * NEW Vectors: As of Argilla 1.19.0, the vectors have been included in order to add support for similarity search to explore similar records based on vector search powered by the search engine defined. The vectors are optional and cannot be seen within the UI, those are uploaded and internally used. Also the vectors will always be optional, and only the dimensions previously defined in their settings. + (optional) sentence\_embedding is of type 'float32' and has a dimension of (1, '384'). Additionally, we also have two more fields that are optional and are the following: * metadata: This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\_properties' defined in the dataset configuration file in 'URL'. * external\_id: This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file. ### Data Splits The dataset contains a single split, which is 'train'. Dataset Creation ---------------- ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation guidelines Classify the articles into one of the four categories. #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information Considerations for Using the Data --------------------------------- ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations Additional Information ---------------------- ### Dataset Curators ### Licensing Information ### Contributions
[ "### Dataset Summary\n\n\nThis dataset contains:\n\n\n* A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\\_huggingface' method in Argilla.\n* Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\\_huggingface' and can be loaded independently using the 'datasets' library via 'load\\_dataset'.\n* The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla.", "### Load with Argilla\n\n\nTo load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code:", "### Load with 'datasets'\n\n\nTo load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code:", "### Supported Tasks and Leaderboards\n\n\nThis dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section.\n\n\nThere are no leaderboards associated with this dataset.", "### Languages\n\n\nDataset Structure\n-----------------", "### Data in Argilla\n\n\nThe dataset is created in Argilla with: fields, questions, suggestions, metadata, vectors, and guidelines.\n\n\nThe fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\nThe questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\\_selection, multi\\_label\\_selection, or ranking.\n\n\n\nThe suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending \"-suggestion\" and \"-suggestion-metadata\" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with \"-suggestion\" and the metadata is appended with \"-suggestion-metadata\".\n\n\nThe metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n\n\nNEW The vectors are different columns that contain a vector in floating point, which is constraint to the pre-defined dimensions in the vectors\\_settings when configuring the vectors within the dataset itself, also the dimensions will always be 1-dimensional. The vectors are optional and identified by the pre-defined vector name in the dataset configuration file in 'URL'.\n\n\nVector Name: sentence\\_embedding, Title: Sentence Embedding, Dimensions: [1, 384]\n\n\n\nThe guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section.", "### Data Instances\n\n\nAn example of a dataset instance in Argilla looks as follows:\n\n\nWhile the same record in HuggingFace 'datasets' looks as follows:", "### Data Fields\n\n\nAmong the dataset fields, we differentiate between the following:\n\n\n* Fields: These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\t+ text is of type 'URL'.\n* Questions: These are the questions that will be asked to the annotators. They can be of different types, such as 'RatingQuestion', 'TextQuestion', 'LabelQuestion', 'MultiLabelQuestion', and 'RankingQuestion'.\n\n\n\t+ label is of type 'QuestionTypes.label\\_selection' with the following allowed values ['World', 'Sports', 'Business', 'Sci/Tech'].\n* Suggestions: As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable.\n\n\n\t+ (optional) label-suggestion is of type 'QuestionTypes.label\\_selection' with the following allowed values ['World', 'Sports', 'Business', 'Sci/Tech'].\n* NEW Vectors: As of Argilla 1.19.0, the vectors have been included in order to add support for similarity search to explore similar records based on vector search powered by the search engine defined. The vectors are optional and cannot be seen within the UI, those are uploaded and internally used. Also the vectors will always be optional, and only the dimensions previously defined in their settings.\n\n\n\t+ (optional) sentence\\_embedding is of type 'float32' and has a dimension of (1, '384').\n\n\nAdditionally, we also have two more fields that are optional and are the following:\n\n\n* metadata: This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n* external\\_id: This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file.", "### Data Splits\n\n\nThe dataset contains a single split, which is 'train'.\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation guidelines\n\n\nClassify the articles into one of the four categories.", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#size_categories-1K<n<10K #rlfh #argilla #human-feedback #region-us \n", "### Dataset Summary\n\n\nThis dataset contains:\n\n\n* A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\\_huggingface' method in Argilla.\n* Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\\_huggingface' and can be loaded independently using the 'datasets' library via 'load\\_dataset'.\n* The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla.", "### Load with Argilla\n\n\nTo load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code:", "### Load with 'datasets'\n\n\nTo load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code:", "### Supported Tasks and Leaderboards\n\n\nThis dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section.\n\n\nThere are no leaderboards associated with this dataset.", "### Languages\n\n\nDataset Structure\n-----------------", "### Data in Argilla\n\n\nThe dataset is created in Argilla with: fields, questions, suggestions, metadata, vectors, and guidelines.\n\n\nThe fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\nThe questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\\_selection, multi\\_label\\_selection, or ranking.\n\n\n\nThe suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending \"-suggestion\" and \"-suggestion-metadata\" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with \"-suggestion\" and the metadata is appended with \"-suggestion-metadata\".\n\n\nThe metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n\n\nNEW The vectors are different columns that contain a vector in floating point, which is constraint to the pre-defined dimensions in the vectors\\_settings when configuring the vectors within the dataset itself, also the dimensions will always be 1-dimensional. The vectors are optional and identified by the pre-defined vector name in the dataset configuration file in 'URL'.\n\n\nVector Name: sentence\\_embedding, Title: Sentence Embedding, Dimensions: [1, 384]\n\n\n\nThe guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section.", "### Data Instances\n\n\nAn example of a dataset instance in Argilla looks as follows:\n\n\nWhile the same record in HuggingFace 'datasets' looks as follows:", "### Data Fields\n\n\nAmong the dataset fields, we differentiate between the following:\n\n\n* Fields: These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\t+ text is of type 'URL'.\n* Questions: These are the questions that will be asked to the annotators. They can be of different types, such as 'RatingQuestion', 'TextQuestion', 'LabelQuestion', 'MultiLabelQuestion', and 'RankingQuestion'.\n\n\n\t+ label is of type 'QuestionTypes.label\\_selection' with the following allowed values ['World', 'Sports', 'Business', 'Sci/Tech'].\n* Suggestions: As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable.\n\n\n\t+ (optional) label-suggestion is of type 'QuestionTypes.label\\_selection' with the following allowed values ['World', 'Sports', 'Business', 'Sci/Tech'].\n* NEW Vectors: As of Argilla 1.19.0, the vectors have been included in order to add support for similarity search to explore similar records based on vector search powered by the search engine defined. The vectors are optional and cannot be seen within the UI, those are uploaded and internally used. Also the vectors will always be optional, and only the dimensions previously defined in their settings.\n\n\n\t+ (optional) sentence\\_embedding is of type 'float32' and has a dimension of (1, '384').\n\n\nAdditionally, we also have two more fields that are optional and are the following:\n\n\n* metadata: This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n* external\\_id: This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file.", "### Data Splits\n\n\nThe dataset contains a single split, which is 'train'.\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation guidelines\n\n\nClassify the articles into one of the four categories.", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 29, 162, 40, 53, 68, 11, 521, 40, 630, 27, 7, 4, 10, 10, 5, 17, 5, 9, 18, 7, 8, 14, 6, 6, 5 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #rlfh #argilla #human-feedback #region-us \n### Dataset Summary\n\n\nThis dataset contains:\n\n\n* A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\\_huggingface' method in Argilla.\n* Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\\_huggingface' and can be loaded independently using the 'datasets' library via 'load\\_dataset'.\n* The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla.### Load with Argilla\n\n\nTo load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code:### Load with 'datasets'\n\n\nTo load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code:### Supported Tasks and Leaderboards\n\n\nThis dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section.\n\n\nThere are no leaderboards associated with this dataset.### Languages\n\n\nDataset Structure\n-----------------", "passage: ### Data in Argilla\n\n\nThe dataset is created in Argilla with: fields, questions, suggestions, metadata, vectors, and guidelines.\n\n\nThe fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\nThe questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\\_selection, multi\\_label\\_selection, or ranking.\n\n\n\nThe suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending \"-suggestion\" and \"-suggestion-metadata\" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with \"-suggestion\" and the metadata is appended with \"-suggestion-metadata\".\n\n\nThe metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n\n\nNEW The vectors are different columns that contain a vector in floating point, which is constraint to the pre-defined dimensions in the vectors\\_settings when configuring the vectors within the dataset itself, also the dimensions will always be 1-dimensional. The vectors are optional and identified by the pre-defined vector name in the dataset configuration file in 'URL'.\n\n\nVector Name: sentence\\_embedding, Title: Sentence Embedding, Dimensions: [1, 384]\n\n\n\nThe guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section.### Data Instances\n\n\nAn example of a dataset instance in Argilla looks as follows:\n\n\nWhile the same record in HuggingFace 'datasets' looks as follows:" ]
103e0fab2505aa619d90577d6680197c2ec8bb36
# Dataset Card for "paragraphs" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
raminass/paragraphs
[ "region:us" ]
2023-11-29T14:54:43+00:00
{"dataset_info": {"features": [{"name": "author_name", "dtype": "string"}, {"name": "year", "dtype": "int64"}, {"name": "label", "dtype": "int64"}, {"name": "category", "dtype": "string"}, {"name": "case_name", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 166455211, "num_examples": 59964}, {"name": "test", "num_bytes": 40760575, "num_examples": 14694}], "download_size": 104303144, "dataset_size": 207215786}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]}
2023-12-10T12:10:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "paragraphs" More Information needed
[ "# Dataset Card for \"paragraphs\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"paragraphs\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"paragraphs\"\n\nMore Information needed" ]
e578ae6a2eab01219b28dbf109632e1849d64455
# Dataset Card for "plantbert-vanilla-cased" The original data comes from [Plant Science Corpus](https://zenodo.org/records/10022686).
shius/plantbert-vanilla-cased
[ "region:us" ]
2023-11-29T14:55:27+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 573766969.1417043, "num_examples": 379176}, {"name": "test", "num_bytes": 63752389.85829573, "num_examples": 42131}], "download_size": 358791102, "dataset_size": 637519359.0}}
2023-11-29T15:05:54+00:00
[]
[]
TAGS #region-us
# Dataset Card for "plantbert-vanilla-cased" The original data comes from Plant Science Corpus.
[ "# Dataset Card for \"plantbert-vanilla-cased\"\n\nThe original data comes from Plant Science Corpus." ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"plantbert-vanilla-cased\"\n\nThe original data comes from Plant Science Corpus." ]
[ 6, 24 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"plantbert-vanilla-cased\"\n\nThe original data comes from Plant Science Corpus." ]
c0a7df5a4c2d3585f0ad7c9595fcabd08507832b
# Dataset Card for "ade20k_dino" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
danjacobellis/ade20k_dino
[ "region:us" ]
2023-11-29T15:02:33+00:00
{"dataset_info": {"features": [{"name": "annotation", "dtype": "image"}, {"name": "scene_category", "dtype": {"class_label": {"names": {"0": "airport_terminal", "1": "art_gallery", "2": "badlands", "3": "ball_pit", "4": "bathroom", "5": "beach", "6": "bedroom", "7": "booth_indoor", "8": "botanical_garden", "9": "bridge", "10": "bullring", "11": "bus_interior", "12": "butte", "13": "canyon", "14": "casino_outdoor", "15": "castle", "16": "church_outdoor", "17": "closet", "18": "coast", "19": "conference_room", "20": "construction_site", "21": "corral", "22": "corridor", "23": "crosswalk", "24": "day_care_center", "25": "sand", "26": "elevator_interior", "27": "escalator_indoor", "28": "forest_road", "29": "gangplank", "30": "gas_station", "31": "golf_course", "32": "gymnasium_indoor", "33": "harbor", "34": "hayfield", "35": "heath", "36": "hoodoo", "37": "house", "38": "hunting_lodge_outdoor", "39": "ice_shelf", "40": "joss_house", "41": "kiosk_indoor", "42": "kitchen", "43": "landfill", "44": "library_indoor", "45": "lido_deck_outdoor", "46": "living_room", "47": "locker_room", "48": "market_outdoor", "49": "mountain_snowy", "50": "office", "51": "orchard", "52": "arbor", "53": "bookshelf", "54": "mews", "55": "nook", "56": "preserve", "57": "traffic_island", "58": "palace", "59": "palace_hall", "60": "pantry", "61": "patio", "62": "phone_booth", "63": "establishment", "64": "poolroom_home", "65": "quonset_hut_outdoor", "66": "rice_paddy", "67": "sandbox", "68": "shopfront", "69": "skyscraper", "70": "stone_circle", "71": "subway_interior", "72": "platform", "73": "supermarket", "74": "swimming_pool_outdoor", "75": "television_studio", "76": "indoor_procenium", "77": "train_railway", "78": "coral_reef", "79": "viaduct", "80": "wave", "81": "wind_farm", "82": "bottle_storage", "83": "abbey", "84": "access_road", "85": "air_base", "86": "airfield", "87": "airlock", "88": "airplane_cabin", "89": "airport", "90": "entrance", "91": "airport_ticket_counter", "92": "alcove", "93": "alley", "94": "amphitheater", "95": "amusement_arcade", "96": "amusement_park", "97": "anechoic_chamber", "98": "apartment_building_outdoor", "99": "apse_indoor", "100": "apse_outdoor", "101": "aquarium", "102": "aquatic_theater", "103": "aqueduct", "104": "arcade", "105": "arch", "106": "archaelogical_excavation", "107": "archive", "108": "basketball", "109": "football", "110": "hockey", "111": "performance", "112": "rodeo", "113": "soccer", "114": "armory", "115": "army_base", "116": "arrival_gate_indoor", "117": "arrival_gate_outdoor", "118": "art_school", "119": "art_studio", "120": "artists_loft", "121": "assembly_line", "122": "athletic_field_indoor", "123": "athletic_field_outdoor", "124": "atrium_home", "125": "atrium_public", "126": "attic", "127": "auditorium", "128": "auto_factory", "129": "auto_mechanics_indoor", "130": "auto_mechanics_outdoor", "131": "auto_racing_paddock", "132": "auto_showroom", "133": "backstage", "134": "backstairs", "135": "badminton_court_indoor", "136": "badminton_court_outdoor", "137": "baggage_claim", "138": "shop", "139": "exterior", "140": "balcony_interior", "141": "ballroom", "142": "bamboo_forest", "143": "bank_indoor", "144": "bank_outdoor", "145": "bank_vault", "146": "banquet_hall", "147": "baptistry_indoor", "148": "baptistry_outdoor", "149": "bar", "150": "barbershop", "151": "barn", "152": "barndoor", "153": "barnyard", "154": "barrack", "155": "baseball_field", "156": "basement", "157": "basilica", "158": "basketball_court_indoor", "159": "basketball_court_outdoor", "160": "bathhouse", "161": "batters_box", "162": "batting_cage_indoor", "163": "batting_cage_outdoor", "164": "battlement", "165": "bayou", "166": "bazaar_indoor", "167": "bazaar_outdoor", "168": "beach_house", "169": "beauty_salon", "170": "bedchamber", "171": "beer_garden", "172": "beer_hall", "173": "belfry", "174": "bell_foundry", "175": "berth", "176": "berth_deck", "177": "betting_shop", "178": "bicycle_racks", "179": "bindery", "180": "biology_laboratory", "181": "bistro_indoor", "182": "bistro_outdoor", "183": "bleachers_indoor", "184": "bleachers_outdoor", "185": "boardwalk", "186": "boat_deck", "187": "boathouse", "188": "bog", "189": "bomb_shelter_indoor", "190": "bookbindery", "191": "bookstore", "192": "bow_window_indoor", "193": "bow_window_outdoor", "194": "bowling_alley", "195": "box_seat", "196": "boxing_ring", "197": "breakroom", "198": "brewery_indoor", "199": "brewery_outdoor", "200": "brickyard_indoor", "201": "brickyard_outdoor", "202": "building_complex", "203": "building_facade", "204": "bullpen", "205": "burial_chamber", "206": "bus_depot_indoor", "207": "bus_depot_outdoor", "208": "bus_shelter", "209": "bus_station_indoor", "210": "bus_station_outdoor", "211": "butchers_shop", "212": "cabana", "213": "cabin_indoor", "214": "cabin_outdoor", "215": "cafeteria", "216": "call_center", "217": "campsite", "218": "campus", "219": "natural", "220": "urban", "221": "candy_store", "222": "canteen", "223": "car_dealership", "224": "backseat", "225": "frontseat", "226": "caravansary", "227": "cardroom", "228": "cargo_container_interior", "229": "airplane", "230": "boat", "231": "freestanding", "232": "carport_indoor", "233": "carport_outdoor", "234": "carrousel", "235": "casino_indoor", "236": "catacomb", "237": "cathedral_indoor", "238": "cathedral_outdoor", "239": "catwalk", "240": "cavern_indoor", "241": "cavern_outdoor", "242": "cemetery", "243": "chalet", "244": "chaparral", "245": "chapel", "246": "checkout_counter", "247": "cheese_factory", "248": "chemical_plant", "249": "chemistry_lab", "250": "chicken_coop_indoor", "251": "chicken_coop_outdoor", "252": "chicken_farm_indoor", "253": "chicken_farm_outdoor", "254": "childs_room", "255": "choir_loft_interior", "256": "church_indoor", "257": "circus_tent_indoor", "258": "circus_tent_outdoor", "259": "city", "260": "classroom", "261": "clean_room", "262": "cliff", "263": "booth", "264": "room", "265": "clock_tower_indoor", "266": "cloister_indoor", "267": "cloister_outdoor", "268": "clothing_store", "269": "coast_road", "270": "cockpit", "271": "coffee_shop", "272": "computer_room", "273": "conference_center", "274": "conference_hall", "275": "confessional", "276": "control_room", "277": "control_tower_indoor", "278": "control_tower_outdoor", "279": "convenience_store_indoor", "280": "convenience_store_outdoor", "281": "corn_field", "282": "cottage", "283": "cottage_garden", "284": "courthouse", "285": "courtroom", "286": "courtyard", "287": "covered_bridge_interior", "288": "crawl_space", "289": "creek", "290": "crevasse", "291": "library", "292": "cybercafe", "293": "dacha", "294": "dairy_indoor", "295": "dairy_outdoor", "296": "dam", "297": "dance_school", "298": "darkroom", "299": "delicatessen", "300": "dentists_office", "301": "department_store", "302": "departure_lounge", "303": "vegetation", "304": "desert_road", "305": "diner_indoor", "306": "diner_outdoor", "307": "dinette_home", "308": "vehicle", "309": "dining_car", "310": "dining_hall", "311": "dining_room", "312": "dirt_track", "313": "discotheque", "314": "distillery", "315": "ditch", "316": "dock", "317": "dolmen", "318": "donjon", "319": "doorway_indoor", "320": "doorway_outdoor", "321": "dorm_room", "322": "downtown", "323": "drainage_ditch", "324": "dress_shop", "325": "dressing_room", "326": "drill_rig", "327": "driveway", "328": "driving_range_indoor", "329": "driving_range_outdoor", "330": "drugstore", "331": "dry_dock", "332": "dugout", "333": "earth_fissure", "334": "editing_room", "335": "electrical_substation", "336": "elevated_catwalk", "337": "door", "338": "freight_elevator", "339": "elevator_lobby", "340": "elevator_shaft", "341": "embankment", "342": "embassy", "343": "engine_room", "344": "entrance_hall", "345": "escalator_outdoor", "346": "escarpment", "347": "estuary", "348": "excavation", "349": "exhibition_hall", "350": "fabric_store", "351": "factory_indoor", "352": "factory_outdoor", "353": "fairway", "354": "farm", "355": "fastfood_restaurant", "356": "fence", "357": "cargo_deck", "358": "ferryboat_indoor", "359": "passenger_deck", "360": "cultivated", "361": "wild", "362": "field_road", "363": "fire_escape", "364": "fire_station", "365": "firing_range_indoor", "366": "firing_range_outdoor", "367": "fish_farm", "368": "fishmarket", "369": "fishpond", "370": "fitting_room_interior", "371": "fjord", "372": "flea_market_indoor", "373": "flea_market_outdoor", "374": "floating_dry_dock", "375": "flood", "376": "florist_shop_indoor", "377": "florist_shop_outdoor", "378": "fly_bridge", "379": "food_court", "380": "football_field", "381": "broadleaf", "382": "needleleaf", "383": "forest_fire", "384": "forest_path", "385": "formal_garden", "386": "fort", "387": "fortress", "388": "foundry_indoor", "389": "foundry_outdoor", "390": "fountain", "391": "freeway", "392": "funeral_chapel", "393": "funeral_home", "394": "furnace_room", "395": "galley", "396": "game_room", "397": "garage_indoor", "398": "garage_outdoor", "399": "garbage_dump", "400": "gasworks", "401": "gate", "402": "gatehouse", "403": "gazebo_interior", "404": "general_store_indoor", "405": "general_store_outdoor", "406": "geodesic_dome_indoor", "407": "geodesic_dome_outdoor", "408": "ghost_town", "409": "gift_shop", "410": "glacier", "411": "glade", "412": "gorge", "413": "granary", "414": "great_hall", "415": "greengrocery", "416": "greenhouse_indoor", "417": "greenhouse_outdoor", "418": "grotto", "419": "guardhouse", "420": "gulch", "421": "gun_deck_indoor", "422": "gun_deck_outdoor", "423": "gun_store", "424": "hacienda", "425": "hallway", "426": "handball_court", "427": "hangar_indoor", "428": "hangar_outdoor", "429": "hardware_store", "430": "hat_shop", "431": "hatchery", "432": "hayloft", "433": "hearth", "434": "hedge_maze", "435": "hedgerow", "436": "heliport", "437": "herb_garden", "438": "highway", "439": "hill", "440": "home_office", "441": "home_theater", "442": "hospital", "443": "hospital_room", "444": "hot_spring", "445": "hot_tub_indoor", "446": "hot_tub_outdoor", "447": "hotel_outdoor", "448": "hotel_breakfast_area", "449": "hotel_room", "450": "hunting_lodge_indoor", "451": "hut", "452": "ice_cream_parlor", "453": "ice_floe", "454": "ice_skating_rink_indoor", "455": "ice_skating_rink_outdoor", "456": "iceberg", "457": "igloo", "458": "imaret", "459": "incinerator_indoor", "460": "incinerator_outdoor", "461": "industrial_area", "462": "industrial_park", "463": "inn_indoor", "464": "inn_outdoor", "465": "irrigation_ditch", "466": "islet", "467": "jacuzzi_indoor", "468": "jacuzzi_outdoor", "469": "jail_indoor", "470": "jail_outdoor", "471": "jail_cell", "472": "japanese_garden", "473": "jetty", "474": "jewelry_shop", "475": "junk_pile", "476": "junkyard", "477": "jury_box", "478": "kasbah", "479": "kennel_indoor", "480": "kennel_outdoor", "481": "kindergarden_classroom", "482": "kiosk_outdoor", "483": "kitchenette", "484": "lab_classroom", "485": "labyrinth_indoor", "486": "labyrinth_outdoor", "487": "lagoon", "488": "artificial", "489": "landing", "490": "landing_deck", "491": "laundromat", "492": "lava_flow", "493": "lavatory", "494": "lawn", "495": "lean-to", "496": "lecture_room", "497": "legislative_chamber", "498": "levee", "499": "library_outdoor", "500": "lido_deck_indoor", "501": "lift_bridge", "502": "lighthouse", "503": "limousine_interior", "504": "liquor_store_indoor", "505": "liquor_store_outdoor", "506": "loading_dock", "507": "lobby", "508": "lock_chamber", "509": "loft", "510": "lookout_station_indoor", "511": "lookout_station_outdoor", "512": "lumberyard_indoor", "513": "lumberyard_outdoor", "514": "machine_shop", "515": "manhole", "516": "mansion", "517": "manufactured_home", "518": "market_indoor", "519": "marsh", "520": "martial_arts_gym", "521": "mastaba", "522": "maternity_ward", "523": "mausoleum", "524": "medina", "525": "menhir", "526": "mesa", "527": "mess_hall", "528": "mezzanine", "529": "military_hospital", "530": "military_hut", "531": "military_tent", "532": "mine", "533": "mineshaft", "534": "mini_golf_course_indoor", "535": "mini_golf_course_outdoor", "536": "mission", "537": "dry", "538": "water", "539": "mobile_home", "540": "monastery_indoor", "541": "monastery_outdoor", "542": "moon_bounce", "543": "moor", "544": "morgue", "545": "mosque_indoor", "546": "mosque_outdoor", "547": "motel", "548": "mountain", "549": "mountain_path", "550": "mountain_road", "551": "movie_theater_indoor", "552": "movie_theater_outdoor", "553": "mudflat", "554": "museum_indoor", "555": "museum_outdoor", "556": "music_store", "557": "music_studio", "558": "misc", "559": "natural_history_museum", "560": "naval_base", "561": "newsroom", "562": "newsstand_indoor", "563": "newsstand_outdoor", "564": "nightclub", "565": "nuclear_power_plant_indoor", "566": "nuclear_power_plant_outdoor", "567": "nunnery", "568": "nursery", "569": "nursing_home", "570": "oasis", "571": "oast_house", "572": "observatory_indoor", "573": "observatory_outdoor", "574": "observatory_post", "575": "ocean", "576": "office_building", "577": "office_cubicles", "578": "oil_refinery_indoor", "579": "oil_refinery_outdoor", "580": "oilrig", "581": "operating_room", "582": "optician", "583": "organ_loft_interior", "584": "orlop_deck", "585": "ossuary", "586": "outcropping", "587": "outhouse_indoor", "588": "outhouse_outdoor", "589": "overpass", "590": "oyster_bar", "591": "oyster_farm", "592": "acropolis", "593": "aircraft_carrier_object", "594": "amphitheater_indoor", "595": "archipelago", "596": "questionable", "597": "assembly_hall", "598": "assembly_plant", "599": "awning_deck", "600": "back_porch", "601": "backdrop", "602": "backroom", "603": "backstage_outdoor", "604": "backstairs_indoor", "605": "backwoods", "606": "ballet", "607": "balustrade", "608": "barbeque", "609": "basin_outdoor", "610": "bath_indoor", "611": "bath_outdoor", "612": "bathhouse_outdoor", "613": "battlefield", "614": "bay", "615": "booth_outdoor", "616": "bottomland", "617": "breakfast_table", "618": "bric-a-brac", "619": "brooklet", "620": "bubble_chamber", "621": "buffet", "622": "bulkhead", "623": "bunk_bed", "624": "bypass", "625": "byroad", "626": "cabin_cruiser", "627": "cargo_helicopter", "628": "cellar", "629": "chair_lift", "630": "cocktail_lounge", "631": "corner", "632": "country_house", "633": "country_road", "634": "customhouse", "635": "dance_floor", "636": "deck-house_boat_deck_house", "637": "deck-house_deck_house", "638": "dining_area", "639": "diving_board", "640": "embrasure", "641": "entranceway_indoor", "642": "entranceway_outdoor", "643": "entryway_outdoor", "644": "estaminet", "645": "farm_building", "646": "farmhouse", "647": "feed_bunk", "648": "field_house", "649": "field_tent_indoor", "650": "field_tent_outdoor", "651": "fire_trench", "652": "fireplace", "653": "flashflood", "654": "flatlet", "655": "floating_dock", "656": "flood_plain", "657": "flowerbed", "658": "flume_indoor", "659": "flying_buttress", "660": "foothill", "661": "forecourt", "662": "foreshore", "663": "front_porch", "664": "garden", "665": "gas_well", "666": "glen", "667": "grape_arbor", "668": "grove", "669": "guardroom", "670": "guesthouse", "671": "gymnasium_outdoor", "672": "head_shop", "673": "hen_yard", "674": "hillock", "675": "housing_estate", "676": "housing_project", "677": "howdah", "678": "inlet", "679": "insane_asylum", "680": "outside", "681": "juke_joint", "682": "jungle", "683": "kraal", "684": "laboratorywet", "685": "landing_strip", "686": "layby", "687": "lean-to_tent", "688": "loge", "689": "loggia_outdoor", "690": "lower_deck", "691": "luggage_van", "692": "mansard", "693": "meadow", "694": "meat_house", "695": "megalith", "696": "mens_store_outdoor", "697": "mental_institution_indoor", "698": "mental_institution_outdoor", "699": "military_headquarters", "700": "millpond", "701": "millrace", "702": "natural_spring", "703": "nursing_home_outdoor", "704": "observation_station", "705": "open-hearth_furnace", "706": "operating_table", "707": "outbuilding", "708": "palestra", "709": "parkway", "710": "patio_indoor", "711": "pavement", "712": "pawnshop_outdoor", "713": "pinetum", "714": "piste_road", "715": "pizzeria_outdoor", "716": "powder_room", "717": "pumping_station", "718": "reception_room", "719": "rest_stop", "720": "retaining_wall", "721": "rift_valley", "722": "road", "723": "rock_garden", "724": "rotisserie", "725": "safari_park", "726": "salon", "727": "saloon", "728": "sanatorium", "729": "science_laboratory", "730": "scrubland", "731": "scullery", "732": "seaside", "733": "semidesert", "734": "shelter", "735": "shelter_deck", "736": "shelter_tent", "737": "shore", "738": "shrubbery", "739": "sidewalk", "740": "snack_bar", "741": "snowbank", "742": "stage_set", "743": "stall", "744": "stateroom", "745": "store", "746": "streetcar_track", "747": "student_center", "748": "study_hall", "749": "sugar_refinery", "750": "sunroom", "751": "supply_chamber", "752": "t-bar_lift", "753": "tannery", "754": "teahouse", "755": "threshing_floor", "756": "ticket_window_indoor", "757": "tidal_basin", "758": "tidal_river", "759": "tiltyard", "760": "tollgate", "761": "tomb", "762": "tract_housing", "763": "trellis", "764": "truck_stop", "765": "upper_balcony", "766": "vestibule", "767": "vinery", "768": "walkway", "769": "war_room", "770": "washroom", "771": "water_fountain", "772": "water_gate", "773": "waterscape", "774": "waterway", "775": "wetland", "776": "widows_walk_indoor", "777": "windstorm", "778": "packaging_plant", "779": "pagoda", "780": "paper_mill", "781": "park", "782": "parking_garage_indoor", "783": "parking_garage_outdoor", "784": "parking_lot", "785": "parlor", "786": "particle_accelerator", "787": "party_tent_indoor", "788": "party_tent_outdoor", "789": "pasture", "790": "pavilion", "791": "pawnshop", "792": "pedestrian_overpass_indoor", "793": "penalty_box", "794": "pet_shop", "795": "pharmacy", "796": "physics_laboratory", "797": "piano_store", "798": "picnic_area", "799": "pier", "800": "pig_farm", "801": "pilothouse_indoor", "802": "pilothouse_outdoor", "803": "pitchers_mound", "804": "pizzeria", "805": "planetarium_indoor", "806": "planetarium_outdoor", "807": "plantation_house", "808": "playground", "809": "playroom", "810": "plaza", "811": "podium_indoor", "812": "podium_outdoor", "813": "police_station", "814": "pond", "815": "pontoon_bridge", "816": "poop_deck", "817": "porch", "818": "portico", "819": "portrait_studio", "820": "postern", "821": "power_plant_outdoor", "822": "print_shop", "823": "priory", "824": "promenade", "825": "promenade_deck", "826": "pub_indoor", "827": "pub_outdoor", "828": "pulpit", "829": "putting_green", "830": "quadrangle", "831": "quicksand", "832": "quonset_hut_indoor", "833": "racecourse", "834": "raceway", "835": "raft", "836": "railroad_track", "837": "railway_yard", "838": "rainforest", "839": "ramp", "840": "ranch", "841": "ranch_house", "842": "reading_room", "843": "reception", "844": "recreation_room", "845": "rectory", "846": "recycling_plant_indoor", "847": "refectory", "848": "repair_shop", "849": "residential_neighborhood", "850": "resort", "851": "rest_area", "852": "restaurant", "853": "restaurant_kitchen", "854": "restaurant_patio", "855": "restroom_indoor", "856": "restroom_outdoor", "857": "revolving_door", "858": "riding_arena", "859": "river", "860": "road_cut", "861": "rock_arch", "862": "roller_skating_rink_indoor", "863": "roller_skating_rink_outdoor", "864": "rolling_mill", "865": "roof", "866": "roof_garden", "867": "root_cellar", "868": "rope_bridge", "869": "roundabout", "870": "roundhouse", "871": "rubble", "872": "ruin", "873": "runway", "874": "sacristy", "875": "salt_plain", "876": "sand_trap", "877": "sandbar", "878": "sauna", "879": "savanna", "880": "sawmill", "881": "schoolhouse", "882": "schoolyard", "883": "science_museum", "884": "scriptorium", "885": "sea_cliff", "886": "seawall", "887": "security_check_point", "888": "server_room", "889": "sewer", "890": "sewing_room", "891": "shed", "892": "shipping_room", "893": "shipyard_outdoor", "894": "shoe_shop", "895": "shopping_mall_indoor", "896": "shopping_mall_outdoor", "897": "shower", "898": "shower_room", "899": "shrine", "900": "signal_box", "901": "sinkhole", "902": "ski_jump", "903": "ski_lodge", "904": "ski_resort", "905": "ski_slope", "906": "sky", "907": "skywalk_indoor", "908": "skywalk_outdoor", "909": "slum", "910": "snowfield", "911": "massage_room", "912": "mineral_bath", "913": "spillway", "914": "sporting_goods_store", "915": "squash_court", "916": "stable", "917": "baseball", "918": "stadium_outdoor", "919": "stage_indoor", "920": "stage_outdoor", "921": "staircase", "922": "starting_gate", "923": "steam_plant_outdoor", "924": "steel_mill_indoor", "925": "storage_room", "926": "storm_cellar", "927": "street", "928": "strip_mall", "929": "strip_mine", "930": "student_residence", "931": "submarine_interior", "932": "sun_deck", "933": "sushi_bar", "934": "swamp", "935": "swimming_hole", "936": "swimming_pool_indoor", "937": "synagogue_indoor", "938": "synagogue_outdoor", "939": "taxistand", "940": "taxiway", "941": "tea_garden", "942": "tearoom", "943": "teashop", "944": "television_room", "945": "east_asia", "946": "mesoamerican", "947": "south_asia", "948": "western", "949": "tennis_court_indoor", "950": "tennis_court_outdoor", "951": "tent_outdoor", "952": "terrace_farm", "953": "indoor_round", "954": "indoor_seats", "955": "theater_outdoor", "956": "thriftshop", "957": "throne_room", "958": "ticket_booth", "959": "tobacco_shop_indoor", "960": "toll_plaza", "961": "tollbooth", "962": "topiary_garden", "963": "tower", "964": "town_house", "965": "toyshop", "966": "track_outdoor", "967": "trading_floor", "968": "trailer_park", "969": "train_interior", "970": "train_station_outdoor", "971": "station", "972": "tree_farm", "973": "tree_house", "974": "trench", "975": "trestle_bridge", "976": "tundra", "977": "rail_indoor", "978": "rail_outdoor", "979": "road_indoor", "980": "road_outdoor", "981": "turkish_bath", "982": "ocean_deep", "983": "ocean_shallow", "984": "utility_room", "985": "valley", "986": "van_interior", "987": "vegetable_garden", "988": "velodrome_indoor", "989": "velodrome_outdoor", "990": "ventilation_shaft", "991": "veranda", "992": "vestry", "993": "veterinarians_office", "994": "videostore", "995": "village", "996": "vineyard", "997": "volcano", "998": "volleyball_court_indoor", "999": "volleyball_court_outdoor", "1000": "voting_booth", "1001": "waiting_room", "1002": "walk_in_freezer", "1003": "warehouse_indoor", "1004": "warehouse_outdoor", "1005": "washhouse_indoor", "1006": "washhouse_outdoor", "1007": "watchtower", "1008": "water_mill", "1009": "water_park", "1010": "water_tower", "1011": "water_treatment_plant_indoor", "1012": "water_treatment_plant_outdoor", "1013": "block", "1014": "cascade", "1015": "cataract", "1016": "fan", "1017": "plunge", "1018": "watering_hole", "1019": "weighbridge", "1020": "wet_bar", "1021": "wharf", "1022": "wheat_field", "1023": "whispering_gallery", "1024": "widows_walk_interior", "1025": "windmill", "1026": "window_seat", "1027": "barrel_storage", "1028": "winery", "1029": "witness_stand", "1030": "woodland", "1031": "workroom", "1032": "workshop", "1033": "wrestling_ring_indoor", "1034": "wrestling_ring_outdoor", "1035": "yard", "1036": "youth_hostel", "1037": "zen_garden", "1038": "ziggurat", "1039": "zoo", "1040": "forklift", "1041": "hollow", "1042": "hutment", "1043": "pueblo", "1044": "vat", "1045": "perfume_shop", "1046": "steel_mill_outdoor", "1047": "orchestra_pit", "1048": "bridle_path", "1049": "lyceum", "1050": "one-way_street", "1051": "parade_ground", "1052": "pump_room", "1053": "recycling_plant_outdoor", "1054": "chuck_wagon"}}}}, {"name": "cls_token", "sequence": {"sequence": "float32"}}, {"name": "patch_tokens", "sequence": {"sequence": {"sequence": "float32"}}}], "splits": [{"name": "validation", "num_bytes": 3169041014.0, "num_examples": 2000}, {"name": "train", "num_bytes": 32018229884.65, "num_examples": 20210}], "download_size": 35425375857, "dataset_size": 35187270898.65}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}, {"split": "train", "path": "data/train-*"}]}]}
2023-11-29T22:36:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ade20k_dino" More Information needed
[ "# Dataset Card for \"ade20k_dino\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ade20k_dino\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ade20k_dino\"\n\nMore Information needed" ]
d8612939450a4b70ca8b9fbdc8f8bc24c13a91b6
## Dataset Description - **Homepage** [Projecte AINA](https://projecteaina.cat/tech/) - **Repository** [HuggingFace](https://huggingface.co/projecte-aina) - **Paper** ["A CURATEd CATalog: Rethinking the Extraction of Pretraining Corpora for Mid-Resourced Languages"]() - **Leaderboard** N/A - **Point of Contact** [email protected] ### Dataset Summary CATalog is a diverse, open-source Catalan corpus for language modelling. It consists of text documents from 26 different sources, including web crawling, news, forums, digital libraries and public institutions, totaling in 17.45 billion words. ### Supported Tasks and Leaderboards - `Fill-Mask` - `Text Generation` - `other:Language-Modelling`: The dataset is suitable for training a model in Language Modelling, predicting the next word in a given context. Success is measured by achieving a low [Perplexity](https://huggingface.co/spaces/evaluate-metric/perplexity) score, indicating the model's proficiency in accurately predicting subsequent words. - `other:Masked-Language-Modelling`: The dataset is designed for training models in Masked Language Modelling. This task involves predicting masked or hidden words within a sentence. Success is typically measured by achieving a high performance score, such as accuracy or [F1](https://huggingface.co/spaces/evaluate-metric/f1) score, on correctly predicting the masked tokens. ### Languages This dataset is in Catalan (ca-ES). Coming from the web, some documents may contain other languages. ## Dataset Structure ### Data Instances The dataset is provided in a JSONL format, where each row corresponds to a single document and contains a document identifier, the text, a quality score, the strategy used to evaluate the document quality, languages, and a URL of the document, if available. ``` { "id": "macocu_ca_20230731_9_402472", "text": "Jaume Casañas relleva Dolors Carreras a l’Alcaldia de l’Ajuntament de Cunit. La substitució prevista al pacte de govern del 2019 s’ha materialitzat aquest dissabte al matí. Aquest dissabte al matí, en un acte al Casal Municipal de Cunit, s’ha celebrat l’acte de relleu de l’Alcaldia de l’Ajuntament de Cunit, segons preveia el pacte de govern signat el juny del 2019 pels grups del PSC, encapçalat per la fins ara alcaldessa, Dolors Carreras, i Impulsem Cunit, amb el ja nou alcalde, Jaume Casañas, al capdavant.", "score": 0.8105327621841463, "strategy": "curate", "languages": "{"ca": 1.0}", "url": "" } ``` ### Data Fields - `id`: text string containing the document identifier. Consists of the subdataset code, the part number and a document number. - `text`: text string from the document, with paragraphs separated by two newlines escape sequences. It is meant to be used directly as input for language modelling. - `score`: positive float number representing the document quality, ranging from 0, which represents the worst quality, to 1, the best quality. - `strategy`: text string describing the type of evaluation applied to obtain the document score. "curate" uses the heuristic evaluation from [CURATE](https://github.com/langtech-bsc/corpus-cleaner-v2) and "perfect" means that manual review was done and the highest score (1) is applied. - `languages`: dictionary containing the document languages, with a percentage indicating the character ratio for each one. - `url`: text string with the URL of the document, if available. ### Data Splits We do not provide any canonical splits for CATalog. ## Dataset Creation ### Curation Rationale CATalog is mainly built on filtered, non-overlapping versions of [CommonCrawl](https://commoncrawl.org/) snapshots and a smaller set of manually selected corpora from specific sources. We use the [CURATE](https://github.com/langtech-bsc/corpus-cleaner-v2) pipeline, which combines exact deduplication, language identification, and scoring heuristics. In the design of CATalog, we adhere to the following values: - (1) **Scale & Flexibility**. We intend to produce datasets that have a significant impact on the training of multilingual models in the range of 7B-180B parameters. Since Catalan is a medium-resource language and data acquisition is already a challenge, binary filtering will limit us in terms of the amount of data. By providing a score, we are able to easily filter the corpus according to any requirement. - (2) **Neutral scoring**. As opposed to ML-based filtering, we use simple rules and heuristics to avoid introducing further bias into the model ([Dodge et al., 2021](https://arxiv.org/abs/2104.08758); [Welbl et al., 2021](https://arxiv.org/abs/2109.07445)). We only use [FastText](https://fasttext.cc/docs/en/language-identification.html) to reject documents in other languages. During development, we performed comparative judgment experiments to evaluate the usefulness of the scoring from the [CURATE](https://github.com/langtech-bsc/corpus-cleaner-v2) pipeline, which is intended for further filtering and analysis. We found a moderate correlation between the score and the perceived quality of the text. Our main goal was to maximize the usability of the corpus without getting into a trade-off between quantity and quality. ### Source Data #### Initial Data Collection and Normalization We applied extensive data processing using our [CURATE](https://github.com/langtech-bsc/corpus-cleaner-v2) pipeline. We first filter documents by their language content using [FastText](https://fasttext.cc/docs/en/language-identification.html). Only documents with at least 50% of characters in Catalan are kept. We then perform exact document deduplication. After this stage, we score each document with a tested set of 8 heuristic evaluators, inspired from other web filterings and from our own creation. The following pre-existing datasets were used: - [`OSCAR-2301`](https://huggingface.co/datasets/oscar-corpus/OSCAR-2301) - [`OSCAR-2201`](https://huggingface.co/datasets/oscar-corpus/OSCAR-2201) - [`CaText`](https://zenodo.org/records/5483031) - [`MaCoCu-ca 1.0`](http://hdl.handle.net/11356/1837) - [`caWaC`](https://huggingface.co/datasets/cawac) - [`Colossal OSCAR 1.0`](https://huggingface.co/datasets/oscar-corpus/colossal-oscar-1.0) - [`mC4`]({https://huggingface.co/datasets/mc4) #### Who are the source language producers? Apart from the pre-existing datasets, all of them coming from [CommonCrawl](https://commoncrawl.org/) dumps, the following sources provided their data on Open Data Agreements: - ## Media Groups - [`IB3`](https://ib3.org/) - [`Grup El Món`](https://grupmon.cat/) - [`Vilaweb`](https://www.vilaweb.cat/) - [`Nació Digital`](https://www.naciodigital.cat/) - [`ACN`](https://www.acn.cat/) - [`Racó Català Articles`](https://www.racocatala.cat/) - [`Racó Català Fòrums (anonymized version)`](https://huggingface.co/datasets/projecte-aina/raco_forums) - [`Aquí Berguedà`](https://www.aquibergueda.cat/) - ## Academic & Book Repositories - [`Tesis Doctorals en Xarxa (TDX)`](https://www.tesisenred.net/) - [`Wikipedia`](https://ca.wikipedia.org/) - [`Project Gutenberg`](https://www.gutenberg.org/) - ## Government Institutions - [`Parlament de Catalunya`](https://www.parlament.cat/web/index.html) - [`Les Corts Valencianes`](https://www.cortsvalencianes.es/) - [`Diari Oficial de la Generalitat Valenciana`](https://dogv.gva.es/) - [`Butlletí Oficial de la Universitat d'Alacant`](https://www.boua.ua.es/) ### Annotations The score is an automatic label obtained from the aggregation of different heuristic evaluators based on predefined thresholds. Specific evaluators penalize documents for factors like minimum word count, average word per sentence, punctuation per word rate, unique sentences ratio, stopword ratio, Brunet index, language diversity, and content identified by regular expressions, providing a comprehensive approach to document scoring. #### Annotation process The process involves assigning scores between 0 and 1 to sentences, paragraphs, and documents in a hierarchical manner. Individual evaluators at different levels contribute scores that are combined using geometric means, emphasizing a probability-like interpretation to encourage evaluators to assess desirability. The final document score is derived through analogous aggregation of paragraph and document scores, distinct from a linear model. #### Who are the annotators? [N/A] ### Personal and Sensitive Information Being mostly constructed from Common Crawl, personal and sensitive information might be present. This must be considered before training deep learning models with CATalog, specially in the case of text-generation models. ## Considerations for Using the Data ### Social Impact of Dataset CATalog promotes the Catalan language in the NLP field, enabling development of advanced applications and chatbots tailored to Catalan speakers, while improving access to information for better community understanding. However, most of the sources in the dataset are web-scraped, which may bring in biases and privacy issues, risking flawed outcomes and potential misuse. Given that Catalan is a mid-resourced language with low representation in digital sources, this dataset becomes crucial for building inclusive NLP applications. It addresses the language's underrepresentation, empowering the Catalan community with improved access to text resources in their native language. However, careful consideration of potential biases and privacy issues is essential to ensure responsible and equitable technology use. ### Discussion of Biases Web-crawled content is over-represented with standard language varieties, impacting language model performance for minority languages. Language diversity in data is crucial to avoid bias, especially in encoding non-standard dialects, preventing the exclusion of demographic groups. Our corpus primarily focuses on Central Catalan, but we actively include Valencian and Balearic Catalan, along with diverse sociolects from platforms like Racó Català Fòrums, aiming for a more representative dataset. Despite legal uncertainties in web-scraped data, we prioritize permissive licenses and privacy protection measures, acknowledging the challenges posed by personally identifiable information (PII) within large-scale datasets. Our ongoing efforts aim to address privacy concerns and contribute to a more inclusive linguistic dataset. ### Other Known Limitations [N/A] ## Additional Information ### Dataset Curators Language Technologies Unit ([email protected]) at the Barcelona Supercomputing Center (BSC). ### Funding This work has been promoted and financed by the Generalitat de Catalunya through the [Aina project](https://projecteaina.cat/). ### Licensing Information CATalog is a collection of text documents from sources with various licenses. The whole work is licensed under the most restrictive license in the corpus, which is [Creative Commons Attribution-NonCommercial-NoDerivs 4.0 International](https://creativecommons.org/licenses/by-nc-nd/4.0/deed.es) license. Any use of all or part of the text gathered in CATalog must abide by the terms of the original licenses, including attribution clauses when relevant. We facilitate this by providing provenance information for each data point. The list of [SPDX license identifiers](https://spdx.org/licenses/) included in the documentation can be found in the following table or in this [JSON file](https://huggingface.co/datasets/projecte-aina/CATalog/blob/main/licenses.json). | Source | Identifier | License | | ----------------------- | ----------------------------------- | ------------------------- | | Tesis Doctorales en Xarxa (TDX) | tdx_ca_20220518 | [CC-BY-4.0](https://creativecommons.org/licenses/by/4.0/legalcode) | | Wikipedia | wikipedia_ca_20230401 | [CC-BY-SA-4.0](https://creativecommons.org/licenses/by-sa/4.0/legalcode) | | IB3 | crawling-ib3_ca_20230205 | Data Sharing Agreement\* | | Les Corts Valencianes | les-corts-valencianes_ca_20230704 | Data Sharing Agreement\* | | Grup El Món | grup-elmon_ca_20230726 | Data Sharing Agreement\* | | Vilaweb | vilaweb_ca_20220728 | Data Sharing Agreement\* | | Nació Digital | naciodigital_ca_20220331 | [CC-BY-NC-ND-4.0](https://creativecommons.org/licenses/by-nc-nd/4.0/legalcode) | | ACN | acn_ca_20201011 | Data Sharing Agreement\* | | Racó Català Articles | racoarticles_ca_20221005 | Data Sharing Agreement\* | | Racó Català Fòrums | racoforumsanon_ca_20211213 | Data Sharing Agreement\* | | Wikimedia | wikimedia_ca_20230829 | [CC-BY-SA-4.0](https://creativecommons.org/licenses/by-sa/4.0/legalcode) | | Project Gutenberg | gutenberg_ca_20220224 | [Project Gutenberg ToU](https://www.gutenberg.org/policy/terms_of_use.html) | | DOGC | dogc_ca_20230901 | Data Sharing Agreement\* | | DOGV | dogv_ca_20231006 | Data Sharing Agreement\* | | BOUA | boua_ca_20231006 | Data Sharing Agreement\* | | Aquí Berguedà | aquibergueda_ca_20231009 | Data Sharing Agreement\* | | Parlament de Catalunya | parlament_ca_20232009 | Data Sharing Agreement\* | | CaWac | cawac_ca_20200528 | [CC-BY-SA-3.0](https://creativecommons.org/licenses/by-sa/3.0/legalcode) | | MaCoCu | macocu_ca_20230731 | [CC-BY-4.0](https://creativecommons.org/licenses/by/4.0/legalcode) | | Crawling populars | crawling-populars_ca_20200525 | [CC0-1.0](https://creativecommons.org/publicdomain/zero/1.0/legalcode) | | Colossal OSCAR 1 (03-04-23) | colossal-oscar-03-04-23_ca_20230829 | [CC0-1.0](https://creativecommons.org/publicdomain/zero/1.0/legalcode) | | Colossal OSCAR 1 (05-06-23) | colossal-oscar-05-06-23_ca_20230829 | [CC0-1.0](https://creativecommons.org/publicdomain/zero/1.0/legalcode) | | Colossal OSCAR 1 (2022-27) | colossal-oscar-2022-27_ca_20231005 | [CC0-1.0](https://creativecommons.org/publicdomain/zero/1.0/legalcode) | | OSCAR-2201 | oscar-2201_ca_20230904 | [CC0-1.0](https://creativecommons.org/publicdomain/zero/1.0/legalcode) | | OSCAR-2301 | oscar-2301_ca_20230418 | [CC0-1.0](https://creativecommons.org/publicdomain/zero/1.0/legalcode) | | mC4 | mc4_ca_20230418 | [CC-BY-4.0](https://creativecommons.org/licenses/by/4.0/legalcode) | \* The data from each entity is governed by a distinct Data Sharing Agreement. All data provided by these entities is open and freely distributable. ### Citation Information [N/A] ### Contributions We thank the VIVES Plan for language technologies of the Valencian community, https://vives.gplsi.es/, from the CENID Digital Intelligence Center of the University of Alicante and the [DFKI](https://www.dfki.de/web) for their collaboration and contribution.
projecte-aina/CATalog
[ "task_categories:fill-mask", "task_categories:text-generation", "task_ids:masked-language-modeling", "task_ids:slot-filling", "task_ids:language-modeling", "annotations_creators:machine-generated", "language_creators:found", "multilinguality:monolingual", "size_categories:10B<n<100B", "source_datasets:extended|mc4", "source_datasets:extended|oscar", "source_datasets:extended|cawac", "language:ca", "license:cc-by-nc-nd-4.0", "arxiv:2104.08758", "arxiv:2109.07445", "region:us" ]
2023-11-29T15:04:58+00:00
{"annotations_creators": ["machine-generated"], "language_creators": ["found"], "language": ["ca"], "license": ["cc-by-nc-nd-4.0"], "multilinguality": ["monolingual"], "size_categories": ["10B<n<100B"], "source_datasets": ["extended|mc4", "extended|oscar", "extended|cawac"], "task_categories": ["fill-mask", "text-generation"], "task_ids": ["masked-language-modeling", "slot-filling", "language-modeling"], "pretty_name": "CATalog", "tags": [], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "score", "dtype": "float64"}, {"name": "strategy", "dtype": "string"}, {"name": "languages", "dtype": "string"}, {"name": "url", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 115827685843, "num_examples": 34314510}], "download_size": 31532509161, "dataset_size": 115827685843}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2024-02-08T12:13:34+00:00
[ "2104.08758", "2109.07445" ]
[ "ca" ]
TAGS #task_categories-fill-mask #task_categories-text-generation #task_ids-masked-language-modeling #task_ids-slot-filling #task_ids-language-modeling #annotations_creators-machine-generated #language_creators-found #multilinguality-monolingual #size_categories-10B<n<100B #source_datasets-extended|mc4 #source_datasets-extended|oscar #source_datasets-extended|cawac #language-Catalan #license-cc-by-nc-nd-4.0 #arxiv-2104.08758 #arxiv-2109.07445 #region-us
Dataset Description ------------------- * Homepage Projecte AINA * Repository HuggingFace * Paper "A CURATEd CATalog: Rethinking the Extraction of Pretraining Corpora for Mid-Resourced Languages" * Leaderboard N/A * Point of Contact langtech@URL ### Dataset Summary CATalog is a diverse, open-source Catalan corpus for language modelling. It consists of text documents from 26 different sources, including web crawling, news, forums, digital libraries and public institutions, totaling in 17.45 billion words. ### Supported Tasks and Leaderboards * 'Fill-Mask' * 'Text Generation' * 'other:Language-Modelling': The dataset is suitable for training a model in Language Modelling, predicting the next word in a given context. Success is measured by achieving a low Perplexity score, indicating the model's proficiency in accurately predicting subsequent words. * 'other:Masked-Language-Modelling': The dataset is designed for training models in Masked Language Modelling. This task involves predicting masked or hidden words within a sentence. Success is typically measured by achieving a high performance score, such as accuracy or F1 score, on correctly predicting the masked tokens. ### Languages This dataset is in Catalan (ca-ES). Coming from the web, some documents may contain other languages. Dataset Structure ----------------- ### Data Instances The dataset is provided in a JSONL format, where each row corresponds to a single document and contains a document identifier, the text, a quality score, the strategy used to evaluate the document quality, languages, and a URL of the document, if available. ### Data Fields * 'id': text string containing the document identifier. Consists of the subdataset code, the part number and a document number. * 'text': text string from the document, with paragraphs separated by two newlines escape sequences. It is meant to be used directly as input for language modelling. * 'score': positive float number representing the document quality, ranging from 0, which represents the worst quality, to 1, the best quality. * 'strategy': text string describing the type of evaluation applied to obtain the document score. "curate" uses the heuristic evaluation from CURATE and "perfect" means that manual review was done and the highest score (1) is applied. * 'languages': dictionary containing the document languages, with a percentage indicating the character ratio for each one. * 'url': text string with the URL of the document, if available. ### Data Splits We do not provide any canonical splits for CATalog. Dataset Creation ---------------- ### Curation Rationale CATalog is mainly built on filtered, non-overlapping versions of CommonCrawl snapshots and a smaller set of manually selected corpora from specific sources. We use the CURATE pipeline, which combines exact deduplication, language identification, and scoring heuristics. In the design of CATalog, we adhere to the following values: * (1) Scale & Flexibility. We intend to produce datasets that have a significant impact on the training of multilingual models in the range of 7B-180B parameters. Since Catalan is a medium-resource language and data acquisition is already a challenge, binary filtering will limit us in terms of the amount of data. By providing a score, we are able to easily filter the corpus according to any requirement. * (2) Neutral scoring. As opposed to ML-based filtering, we use simple rules and heuristics to avoid introducing further bias into the model (Dodge et al., 2021; Welbl et al., 2021). We only use FastText to reject documents in other languages. During development, we performed comparative judgment experiments to evaluate the usefulness of the scoring from the CURATE pipeline, which is intended for further filtering and analysis. We found a moderate correlation between the score and the perceived quality of the text. Our main goal was to maximize the usability of the corpus without getting into a trade-off between quantity and quality. ### Source Data #### Initial Data Collection and Normalization We applied extensive data processing using our CURATE pipeline. We first filter documents by their language content using FastText. Only documents with at least 50% of characters in Catalan are kept. We then perform exact document deduplication. After this stage, we score each document with a tested set of 8 heuristic evaluators, inspired from other web filterings and from our own creation. The following pre-existing datasets were used: * 'OSCAR-2301' * 'OSCAR-2201' * 'CaText' * 'MaCoCu-ca 1.0' * 'caWaC' * 'Colossal OSCAR 1.0' * 'mC4' #### Who are the source language producers? Apart from the pre-existing datasets, all of them coming from CommonCrawl dumps, the following sources provided their data on Open Data Agreements: * Media Groups ------------ * 'IB3' * 'Grup El Món' * 'Vilaweb' * 'Nació Digital' * 'ACN' * 'Racó Català Articles' * 'Racó Català Fòrums (anonymized version)' * 'Aquí Berguedà' * Academic & Book Repositories ---------------------------- * 'Tesis Doctorals en Xarxa (TDX)' * 'Wikipedia' * 'Project Gutenberg' * Government Institutions ----------------------- * 'Parlament de Catalunya' * 'Les Corts Valencianes' * 'Diari Oficial de la Generalitat Valenciana' * 'Butlletí Oficial de la Universitat d'Alacant' ### Annotations The score is an automatic label obtained from the aggregation of different heuristic evaluators based on predefined thresholds. Specific evaluators penalize documents for factors like minimum word count, average word per sentence, punctuation per word rate, unique sentences ratio, stopword ratio, Brunet index, language diversity, and content identified by regular expressions, providing a comprehensive approach to document scoring. #### Annotation process The process involves assigning scores between 0 and 1 to sentences, paragraphs, and documents in a hierarchical manner. Individual evaluators at different levels contribute scores that are combined using geometric means, emphasizing a probability-like interpretation to encourage evaluators to assess desirability. The final document score is derived through analogous aggregation of paragraph and document scores, distinct from a linear model. #### Who are the annotators? [N/A] ### Personal and Sensitive Information Being mostly constructed from Common Crawl, personal and sensitive information might be present. This must be considered before training deep learning models with CATalog, specially in the case of text-generation models. Considerations for Using the Data --------------------------------- ### Social Impact of Dataset CATalog promotes the Catalan language in the NLP field, enabling development of advanced applications and chatbots tailored to Catalan speakers, while improving access to information for better community understanding. However, most of the sources in the dataset are web-scraped, which may bring in biases and privacy issues, risking flawed outcomes and potential misuse. Given that Catalan is a mid-resourced language with low representation in digital sources, this dataset becomes crucial for building inclusive NLP applications. It addresses the language's underrepresentation, empowering the Catalan community with improved access to text resources in their native language. However, careful consideration of potential biases and privacy issues is essential to ensure responsible and equitable technology use. ### Discussion of Biases Web-crawled content is over-represented with standard language varieties, impacting language model performance for minority languages. Language diversity in data is crucial to avoid bias, especially in encoding non-standard dialects, preventing the exclusion of demographic groups. Our corpus primarily focuses on Central Catalan, but we actively include Valencian and Balearic Catalan, along with diverse sociolects from platforms like Racó Català Fòrums, aiming for a more representative dataset. Despite legal uncertainties in web-scraped data, we prioritize permissive licenses and privacy protection measures, acknowledging the challenges posed by personally identifiable information (PII) within large-scale datasets. Our ongoing efforts aim to address privacy concerns and contribute to a more inclusive linguistic dataset. ### Other Known Limitations [N/A] Additional Information ---------------------- ### Dataset Curators Language Technologies Unit (langtech@URL) at the Barcelona Supercomputing Center (BSC). ### Funding This work has been promoted and financed by the Generalitat de Catalunya through the Aina project. ### Licensing Information CATalog is a collection of text documents from sources with various licenses. The whole work is licensed under the most restrictive license in the corpus, which is Creative Commons Attribution-NonCommercial-NoDerivs 4.0 International license. Any use of all or part of the text gathered in CATalog must abide by the terms of the original licenses, including attribution clauses when relevant. We facilitate this by providing provenance information for each data point. The list of SPDX license identifiers included in the documentation can be found in the following table or in this JSON file. Source: Tesis Doctorales en Xarxa (TDX), Identifier: tdx\_ca\_20220518, License: CC-BY-4.0 Source: Wikipedia, Identifier: wikipedia\_ca\_20230401, License: CC-BY-SA-4.0 Source: IB3, Identifier: crawling-ib3\_ca\_20230205, License: Data Sharing Agreement\* Source: Les Corts Valencianes, Identifier: les-corts-valencianes\_ca\_20230704, License: Data Sharing Agreement\* Source: Grup El Món, Identifier: grup-elmon\_ca\_20230726, License: Data Sharing Agreement\* Source: Vilaweb, Identifier: vilaweb\_ca\_20220728, License: Data Sharing Agreement\* Source: Nació Digital, Identifier: naciodigital\_ca\_20220331, License: CC-BY-NC-ND-4.0 Source: ACN, Identifier: acn\_ca\_20201011, License: Data Sharing Agreement\* Source: Racó Català Articles, Identifier: racoarticles\_ca\_20221005, License: Data Sharing Agreement\* Source: Racó Català Fòrums, Identifier: racoforumsanon\_ca\_20211213, License: Data Sharing Agreement\* Source: Wikimedia, Identifier: wikimedia\_ca\_20230829, License: CC-BY-SA-4.0 Source: Project Gutenberg, Identifier: gutenberg\_ca\_20220224, License: Project Gutenberg ToU Source: DOGC, Identifier: dogc\_ca\_20230901, License: Data Sharing Agreement\* Source: DOGV, Identifier: dogv\_ca\_20231006, License: Data Sharing Agreement\* Source: BOUA, Identifier: boua\_ca\_20231006, License: Data Sharing Agreement\* Source: Aquí Berguedà, Identifier: aquibergueda\_ca\_20231009, License: Data Sharing Agreement\* Source: Parlament de Catalunya, Identifier: parlament\_ca\_20232009, License: Data Sharing Agreement\* Source: CaWac, Identifier: cawac\_ca\_20200528, License: CC-BY-SA-3.0 Source: MaCoCu, Identifier: macocu\_ca\_20230731, License: CC-BY-4.0 Source: Crawling populars, Identifier: crawling-populars\_ca\_20200525, License: CC0-1.0 Source: Colossal OSCAR 1 (03-04-23), Identifier: colossal-oscar-03-04-23\_ca\_20230829, License: CC0-1.0 Source: Colossal OSCAR 1 (05-06-23), Identifier: colossal-oscar-05-06-23\_ca\_20230829, License: CC0-1.0 Source: Colossal OSCAR 1 (2022-27), Identifier: colossal-oscar-2022-27\_ca\_20231005, License: CC0-1.0 Source: OSCAR-2201, Identifier: oscar-2201\_ca\_20230904, License: CC0-1.0 Source: OSCAR-2301, Identifier: oscar-2301\_ca\_20230418, License: CC0-1.0 Source: mC4, Identifier: mc4\_ca\_20230418, License: CC-BY-4.0 \* The data from each entity is governed by a distinct Data Sharing Agreement. All data provided by these entities is open and freely distributable. [N/A] ### Contributions We thank the VIVES Plan for language technologies of the Valencian community, URL from the CENID Digital Intelligence Center of the University of Alicante and the DFKI for their collaboration and contribution.
[ "### Dataset Summary\n\n\nCATalog is a diverse, open-source Catalan corpus for language modelling. It consists of text documents from 26 different sources, including web crawling, news, forums, digital libraries and public institutions, totaling in 17.45 billion words.", "### Supported Tasks and Leaderboards\n\n\n* 'Fill-Mask'\n* 'Text Generation'\n* 'other:Language-Modelling': The dataset is suitable for training a model in Language Modelling, predicting the next word in a given context. Success is measured by achieving a low Perplexity score, indicating the model's proficiency in accurately predicting subsequent words.\n* 'other:Masked-Language-Modelling': The dataset is designed for training models in Masked Language Modelling. This task involves predicting masked or hidden words within a sentence. Success is typically measured by achieving a high performance score, such as accuracy or F1 score, on correctly predicting the masked tokens.", "### Languages\n\n\nThis dataset is in Catalan (ca-ES). Coming from the web, some documents may contain other languages.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nThe dataset is provided in a JSONL format, where each row corresponds to a single document and contains a document identifier, the text, a quality score, the strategy used to evaluate the document quality, languages, and a URL of the document, if available.", "### Data Fields\n\n\n* 'id': text string containing the document identifier. Consists of the subdataset code, the part number and a document number.\n* 'text': text string from the document, with paragraphs separated by two newlines escape sequences. It is meant to be used directly as input for language modelling.\n* 'score': positive float number representing the document quality, ranging from 0, which represents the worst quality, to 1, the best quality.\n* 'strategy': text string describing the type of evaluation applied to obtain the document score. \"curate\" uses the heuristic evaluation from CURATE and \"perfect\" means that manual review was done and the highest score (1) is applied.\n* 'languages': dictionary containing the document languages, with a percentage indicating the character ratio for each one.\n* 'url': text string with the URL of the document, if available.", "### Data Splits\n\n\nWe do not provide any canonical splits for CATalog.\n\n\nDataset Creation\n----------------", "### Curation Rationale\n\n\nCATalog is mainly built on filtered, non-overlapping versions of CommonCrawl snapshots and a smaller set of manually selected corpora from specific sources. We use the CURATE pipeline, which combines exact deduplication, language identification, and scoring heuristics.\n\n\nIn the design of CATalog, we adhere to the following values:\n\n\n* (1) Scale & Flexibility. We intend to produce datasets that have a significant impact on the training of multilingual models in the range of 7B-180B parameters. Since Catalan is a medium-resource language and data acquisition is already a challenge, binary filtering will limit us in terms of the amount of data. By providing a score, we are able to easily filter the corpus according to any requirement.\n* (2) Neutral scoring. As opposed to ML-based filtering, we use simple rules and heuristics to avoid introducing further bias into the model (Dodge et al., 2021; Welbl et al., 2021). We only use FastText to reject documents in other languages.\n\n\nDuring development, we performed comparative judgment experiments to evaluate the usefulness of the scoring from the CURATE pipeline, which is intended for further filtering and analysis. We found a moderate correlation between the score and the perceived quality of the text. Our main goal was to maximize the usability of the corpus without getting into a trade-off between quantity and quality.", "### Source Data", "#### Initial Data Collection and Normalization\n\n\nWe applied extensive data processing using our CURATE pipeline.\n\n\nWe first filter documents by their language content using FastText. Only documents with at least 50% of characters in Catalan are kept. We then perform exact document deduplication. After this stage, we score each document with a tested set of 8 heuristic evaluators, inspired from other web filterings and from our own creation.\n\n\nThe following pre-existing datasets were used:\n\n\n* 'OSCAR-2301'\n* 'OSCAR-2201'\n* 'CaText'\n* 'MaCoCu-ca 1.0'\n* 'caWaC'\n* 'Colossal OSCAR 1.0'\n* 'mC4'", "#### Who are the source language producers?\n\n\nApart from the pre-existing datasets, all of them coming from CommonCrawl dumps, the following\nsources provided their data on Open Data Agreements:\n\n\n* Media Groups\n------------\n* 'IB3'\n* 'Grup El Món'\n* 'Vilaweb'\n* 'Nació Digital'\n* 'ACN'\n* 'Racó Català Articles'\n* 'Racó Català Fòrums (anonymized version)'\n* 'Aquí Berguedà'\n* Academic & Book Repositories\n----------------------------\n* 'Tesis Doctorals en Xarxa (TDX)'\n* 'Wikipedia'\n* 'Project Gutenberg'\n* Government Institutions\n-----------------------\n* 'Parlament de Catalunya'\n* 'Les Corts Valencianes'\n* 'Diari Oficial de la Generalitat Valenciana'\n* 'Butlletí Oficial de la Universitat d'Alacant'", "### Annotations\n\n\nThe score is an automatic label obtained from the aggregation of different heuristic evaluators based on predefined thresholds. Specific evaluators penalize documents for factors like minimum word count, average word per sentence, punctuation per word rate, unique sentences ratio, stopword ratio, Brunet index, language diversity, and content identified by regular expressions, providing a comprehensive approach to document scoring.", "#### Annotation process\n\n\nThe process involves assigning scores between 0 and 1 to sentences, paragraphs, and documents in a hierarchical manner. Individual evaluators at different levels contribute scores that are combined using geometric means, emphasizing a probability-like interpretation to encourage evaluators to assess desirability. The final document score is derived through analogous aggregation of paragraph and document scores, distinct from a linear model.", "#### Who are the annotators?\n\n\n[N/A]", "### Personal and Sensitive Information\n\n\nBeing mostly constructed from Common Crawl, personal and sensitive information might be present.\nThis must be considered before training deep learning models with CATalog, specially in the case of text-generation models.\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset\n\n\nCATalog promotes the Catalan language in the NLP field, enabling development of advanced applications and chatbots tailored to Catalan speakers, while improving access to information for better community understanding. However, most of the sources in the dataset are web-scraped, which may bring in biases and privacy issues, risking flawed outcomes and potential misuse.\n\n\nGiven that Catalan is a mid-resourced language with low representation in digital sources, this dataset becomes crucial for building inclusive NLP applications. It addresses the language's underrepresentation, empowering the Catalan community with improved access to text resources in their native language. However, careful consideration of potential biases and privacy issues is essential to ensure responsible and equitable technology use.", "### Discussion of Biases\n\n\nWeb-crawled content is over-represented with standard language varieties, impacting language model performance for minority languages. Language diversity in data is crucial to avoid bias, especially in encoding non-standard dialects, preventing the exclusion of demographic groups. Our corpus primarily focuses on Central Catalan, but we actively include Valencian and Balearic Catalan, along with diverse sociolects from platforms like Racó Català Fòrums, aiming for a more representative dataset. Despite legal uncertainties in web-scraped data, we prioritize permissive licenses and privacy protection measures, acknowledging the challenges posed by personally identifiable information (PII) within large-scale datasets. Our ongoing efforts aim to address privacy concerns and contribute to a more inclusive linguistic dataset.", "### Other Known Limitations\n\n\n[N/A]\n\n\nAdditional Information\n----------------------", "### Dataset Curators\n\n\nLanguage Technologies Unit (langtech@URL) at the Barcelona Supercomputing Center (BSC).", "### Funding\n\n\nThis work has been promoted and financed by the Generalitat de Catalunya through the Aina project.", "### Licensing Information\n\n\nCATalog is a collection of text documents from sources with various licenses. The whole work is licensed under the most restrictive license in the corpus, which is Creative Commons Attribution-NonCommercial-NoDerivs 4.0 International license. Any use of all or part of the text gathered in CATalog must abide by the terms of the original licenses, including attribution clauses when relevant. We facilitate this by providing provenance information for each data point.\n\n\nThe list of SPDX license identifiers included in the documentation can be found in the following table or in this JSON file.\n\n\nSource: Tesis Doctorales en Xarxa (TDX), Identifier: tdx\\_ca\\_20220518, License: CC-BY-4.0\nSource: Wikipedia, Identifier: wikipedia\\_ca\\_20230401, License: CC-BY-SA-4.0\nSource: IB3, Identifier: crawling-ib3\\_ca\\_20230205, License: Data Sharing Agreement\\*\nSource: Les Corts Valencianes, Identifier: les-corts-valencianes\\_ca\\_20230704, License: Data Sharing Agreement\\*\nSource: Grup El Món, Identifier: grup-elmon\\_ca\\_20230726, License: Data Sharing Agreement\\*\nSource: Vilaweb, Identifier: vilaweb\\_ca\\_20220728, License: Data Sharing Agreement\\*\nSource: Nació Digital, Identifier: naciodigital\\_ca\\_20220331, License: CC-BY-NC-ND-4.0\nSource: ACN, Identifier: acn\\_ca\\_20201011, License: Data Sharing Agreement\\*\nSource: Racó Català Articles, Identifier: racoarticles\\_ca\\_20221005, License: Data Sharing Agreement\\*\nSource: Racó Català Fòrums, Identifier: racoforumsanon\\_ca\\_20211213, License: Data Sharing Agreement\\*\nSource: Wikimedia, Identifier: wikimedia\\_ca\\_20230829, License: CC-BY-SA-4.0\nSource: Project Gutenberg, Identifier: gutenberg\\_ca\\_20220224, License: Project Gutenberg ToU\nSource: DOGC, Identifier: dogc\\_ca\\_20230901, License: Data Sharing Agreement\\*\nSource: DOGV, Identifier: dogv\\_ca\\_20231006, License: Data Sharing Agreement\\*\nSource: BOUA, Identifier: boua\\_ca\\_20231006, License: Data Sharing Agreement\\*\nSource: Aquí Berguedà, Identifier: aquibergueda\\_ca\\_20231009, License: Data Sharing Agreement\\*\nSource: Parlament de Catalunya, Identifier: parlament\\_ca\\_20232009, License: Data Sharing Agreement\\*\nSource: CaWac, Identifier: cawac\\_ca\\_20200528, License: CC-BY-SA-3.0\nSource: MaCoCu, Identifier: macocu\\_ca\\_20230731, License: CC-BY-4.0\nSource: Crawling populars, Identifier: crawling-populars\\_ca\\_20200525, License: CC0-1.0\nSource: Colossal OSCAR 1 (03-04-23), Identifier: colossal-oscar-03-04-23\\_ca\\_20230829, License: CC0-1.0\nSource: Colossal OSCAR 1 (05-06-23), Identifier: colossal-oscar-05-06-23\\_ca\\_20230829, License: CC0-1.0\nSource: Colossal OSCAR 1 (2022-27), Identifier: colossal-oscar-2022-27\\_ca\\_20231005, License: CC0-1.0\nSource: OSCAR-2201, Identifier: oscar-2201\\_ca\\_20230904, License: CC0-1.0\nSource: OSCAR-2301, Identifier: oscar-2301\\_ca\\_20230418, License: CC0-1.0\nSource: mC4, Identifier: mc4\\_ca\\_20230418, License: CC-BY-4.0\n\n\n\\* The data from each entity is governed by a distinct Data Sharing Agreement. All data provided by these entities is open and freely distributable.\n\n\n[N/A]", "### Contributions\n\n\nWe thank the VIVES Plan for language technologies of the Valencian community, URL from the CENID Digital Intelligence Center of the University of Alicante and the DFKI for their collaboration and contribution." ]
[ "TAGS\n#task_categories-fill-mask #task_categories-text-generation #task_ids-masked-language-modeling #task_ids-slot-filling #task_ids-language-modeling #annotations_creators-machine-generated #language_creators-found #multilinguality-monolingual #size_categories-10B<n<100B #source_datasets-extended|mc4 #source_datasets-extended|oscar #source_datasets-extended|cawac #language-Catalan #license-cc-by-nc-nd-4.0 #arxiv-2104.08758 #arxiv-2109.07445 #region-us \n", "### Dataset Summary\n\n\nCATalog is a diverse, open-source Catalan corpus for language modelling. It consists of text documents from 26 different sources, including web crawling, news, forums, digital libraries and public institutions, totaling in 17.45 billion words.", "### Supported Tasks and Leaderboards\n\n\n* 'Fill-Mask'\n* 'Text Generation'\n* 'other:Language-Modelling': The dataset is suitable for training a model in Language Modelling, predicting the next word in a given context. Success is measured by achieving a low Perplexity score, indicating the model's proficiency in accurately predicting subsequent words.\n* 'other:Masked-Language-Modelling': The dataset is designed for training models in Masked Language Modelling. This task involves predicting masked or hidden words within a sentence. Success is typically measured by achieving a high performance score, such as accuracy or F1 score, on correctly predicting the masked tokens.", "### Languages\n\n\nThis dataset is in Catalan (ca-ES). Coming from the web, some documents may contain other languages.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nThe dataset is provided in a JSONL format, where each row corresponds to a single document and contains a document identifier, the text, a quality score, the strategy used to evaluate the document quality, languages, and a URL of the document, if available.", "### Data Fields\n\n\n* 'id': text string containing the document identifier. Consists of the subdataset code, the part number and a document number.\n* 'text': text string from the document, with paragraphs separated by two newlines escape sequences. It is meant to be used directly as input for language modelling.\n* 'score': positive float number representing the document quality, ranging from 0, which represents the worst quality, to 1, the best quality.\n* 'strategy': text string describing the type of evaluation applied to obtain the document score. \"curate\" uses the heuristic evaluation from CURATE and \"perfect\" means that manual review was done and the highest score (1) is applied.\n* 'languages': dictionary containing the document languages, with a percentage indicating the character ratio for each one.\n* 'url': text string with the URL of the document, if available.", "### Data Splits\n\n\nWe do not provide any canonical splits for CATalog.\n\n\nDataset Creation\n----------------", "### Curation Rationale\n\n\nCATalog is mainly built on filtered, non-overlapping versions of CommonCrawl snapshots and a smaller set of manually selected corpora from specific sources. We use the CURATE pipeline, which combines exact deduplication, language identification, and scoring heuristics.\n\n\nIn the design of CATalog, we adhere to the following values:\n\n\n* (1) Scale & Flexibility. We intend to produce datasets that have a significant impact on the training of multilingual models in the range of 7B-180B parameters. Since Catalan is a medium-resource language and data acquisition is already a challenge, binary filtering will limit us in terms of the amount of data. By providing a score, we are able to easily filter the corpus according to any requirement.\n* (2) Neutral scoring. As opposed to ML-based filtering, we use simple rules and heuristics to avoid introducing further bias into the model (Dodge et al., 2021; Welbl et al., 2021). We only use FastText to reject documents in other languages.\n\n\nDuring development, we performed comparative judgment experiments to evaluate the usefulness of the scoring from the CURATE pipeline, which is intended for further filtering and analysis. We found a moderate correlation between the score and the perceived quality of the text. Our main goal was to maximize the usability of the corpus without getting into a trade-off between quantity and quality.", "### Source Data", "#### Initial Data Collection and Normalization\n\n\nWe applied extensive data processing using our CURATE pipeline.\n\n\nWe first filter documents by their language content using FastText. Only documents with at least 50% of characters in Catalan are kept. We then perform exact document deduplication. After this stage, we score each document with a tested set of 8 heuristic evaluators, inspired from other web filterings and from our own creation.\n\n\nThe following pre-existing datasets were used:\n\n\n* 'OSCAR-2301'\n* 'OSCAR-2201'\n* 'CaText'\n* 'MaCoCu-ca 1.0'\n* 'caWaC'\n* 'Colossal OSCAR 1.0'\n* 'mC4'", "#### Who are the source language producers?\n\n\nApart from the pre-existing datasets, all of them coming from CommonCrawl dumps, the following\nsources provided their data on Open Data Agreements:\n\n\n* Media Groups\n------------\n* 'IB3'\n* 'Grup El Món'\n* 'Vilaweb'\n* 'Nació Digital'\n* 'ACN'\n* 'Racó Català Articles'\n* 'Racó Català Fòrums (anonymized version)'\n* 'Aquí Berguedà'\n* Academic & Book Repositories\n----------------------------\n* 'Tesis Doctorals en Xarxa (TDX)'\n* 'Wikipedia'\n* 'Project Gutenberg'\n* Government Institutions\n-----------------------\n* 'Parlament de Catalunya'\n* 'Les Corts Valencianes'\n* 'Diari Oficial de la Generalitat Valenciana'\n* 'Butlletí Oficial de la Universitat d'Alacant'", "### Annotations\n\n\nThe score is an automatic label obtained from the aggregation of different heuristic evaluators based on predefined thresholds. Specific evaluators penalize documents for factors like minimum word count, average word per sentence, punctuation per word rate, unique sentences ratio, stopword ratio, Brunet index, language diversity, and content identified by regular expressions, providing a comprehensive approach to document scoring.", "#### Annotation process\n\n\nThe process involves assigning scores between 0 and 1 to sentences, paragraphs, and documents in a hierarchical manner. Individual evaluators at different levels contribute scores that are combined using geometric means, emphasizing a probability-like interpretation to encourage evaluators to assess desirability. The final document score is derived through analogous aggregation of paragraph and document scores, distinct from a linear model.", "#### Who are the annotators?\n\n\n[N/A]", "### Personal and Sensitive Information\n\n\nBeing mostly constructed from Common Crawl, personal and sensitive information might be present.\nThis must be considered before training deep learning models with CATalog, specially in the case of text-generation models.\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset\n\n\nCATalog promotes the Catalan language in the NLP field, enabling development of advanced applications and chatbots tailored to Catalan speakers, while improving access to information for better community understanding. However, most of the sources in the dataset are web-scraped, which may bring in biases and privacy issues, risking flawed outcomes and potential misuse.\n\n\nGiven that Catalan is a mid-resourced language with low representation in digital sources, this dataset becomes crucial for building inclusive NLP applications. It addresses the language's underrepresentation, empowering the Catalan community with improved access to text resources in their native language. However, careful consideration of potential biases and privacy issues is essential to ensure responsible and equitable technology use.", "### Discussion of Biases\n\n\nWeb-crawled content is over-represented with standard language varieties, impacting language model performance for minority languages. Language diversity in data is crucial to avoid bias, especially in encoding non-standard dialects, preventing the exclusion of demographic groups. Our corpus primarily focuses on Central Catalan, but we actively include Valencian and Balearic Catalan, along with diverse sociolects from platforms like Racó Català Fòrums, aiming for a more representative dataset. Despite legal uncertainties in web-scraped data, we prioritize permissive licenses and privacy protection measures, acknowledging the challenges posed by personally identifiable information (PII) within large-scale datasets. Our ongoing efforts aim to address privacy concerns and contribute to a more inclusive linguistic dataset.", "### Other Known Limitations\n\n\n[N/A]\n\n\nAdditional Information\n----------------------", "### Dataset Curators\n\n\nLanguage Technologies Unit (langtech@URL) at the Barcelona Supercomputing Center (BSC).", "### Funding\n\n\nThis work has been promoted and financed by the Generalitat de Catalunya through the Aina project.", "### Licensing Information\n\n\nCATalog is a collection of text documents from sources with various licenses. The whole work is licensed under the most restrictive license in the corpus, which is Creative Commons Attribution-NonCommercial-NoDerivs 4.0 International license. Any use of all or part of the text gathered in CATalog must abide by the terms of the original licenses, including attribution clauses when relevant. We facilitate this by providing provenance information for each data point.\n\n\nThe list of SPDX license identifiers included in the documentation can be found in the following table or in this JSON file.\n\n\nSource: Tesis Doctorales en Xarxa (TDX), Identifier: tdx\\_ca\\_20220518, License: CC-BY-4.0\nSource: Wikipedia, Identifier: wikipedia\\_ca\\_20230401, License: CC-BY-SA-4.0\nSource: IB3, Identifier: crawling-ib3\\_ca\\_20230205, License: Data Sharing Agreement\\*\nSource: Les Corts Valencianes, Identifier: les-corts-valencianes\\_ca\\_20230704, License: Data Sharing Agreement\\*\nSource: Grup El Món, Identifier: grup-elmon\\_ca\\_20230726, License: Data Sharing Agreement\\*\nSource: Vilaweb, Identifier: vilaweb\\_ca\\_20220728, License: Data Sharing Agreement\\*\nSource: Nació Digital, Identifier: naciodigital\\_ca\\_20220331, License: CC-BY-NC-ND-4.0\nSource: ACN, Identifier: acn\\_ca\\_20201011, License: Data Sharing Agreement\\*\nSource: Racó Català Articles, Identifier: racoarticles\\_ca\\_20221005, License: Data Sharing Agreement\\*\nSource: Racó Català Fòrums, Identifier: racoforumsanon\\_ca\\_20211213, License: Data Sharing Agreement\\*\nSource: Wikimedia, Identifier: wikimedia\\_ca\\_20230829, License: CC-BY-SA-4.0\nSource: Project Gutenberg, Identifier: gutenberg\\_ca\\_20220224, License: Project Gutenberg ToU\nSource: DOGC, Identifier: dogc\\_ca\\_20230901, License: Data Sharing Agreement\\*\nSource: DOGV, Identifier: dogv\\_ca\\_20231006, License: Data Sharing Agreement\\*\nSource: BOUA, Identifier: boua\\_ca\\_20231006, License: Data Sharing Agreement\\*\nSource: Aquí Berguedà, Identifier: aquibergueda\\_ca\\_20231009, License: Data Sharing Agreement\\*\nSource: Parlament de Catalunya, Identifier: parlament\\_ca\\_20232009, License: Data Sharing Agreement\\*\nSource: CaWac, Identifier: cawac\\_ca\\_20200528, License: CC-BY-SA-3.0\nSource: MaCoCu, Identifier: macocu\\_ca\\_20230731, License: CC-BY-4.0\nSource: Crawling populars, Identifier: crawling-populars\\_ca\\_20200525, License: CC0-1.0\nSource: Colossal OSCAR 1 (03-04-23), Identifier: colossal-oscar-03-04-23\\_ca\\_20230829, License: CC0-1.0\nSource: Colossal OSCAR 1 (05-06-23), Identifier: colossal-oscar-05-06-23\\_ca\\_20230829, License: CC0-1.0\nSource: Colossal OSCAR 1 (2022-27), Identifier: colossal-oscar-2022-27\\_ca\\_20231005, License: CC0-1.0\nSource: OSCAR-2201, Identifier: oscar-2201\\_ca\\_20230904, License: CC0-1.0\nSource: OSCAR-2301, Identifier: oscar-2301\\_ca\\_20230418, License: CC0-1.0\nSource: mC4, Identifier: mc4\\_ca\\_20230418, License: CC-BY-4.0\n\n\n\\* The data from each entity is governed by a distinct Data Sharing Agreement. All data provided by these entities is open and freely distributable.\n\n\n[N/A]", "### Contributions\n\n\nWe thank the VIVES Plan for language technologies of the Valencian community, URL from the CENID Digital Intelligence Center of the University of Alicante and the DFKI for their collaboration and contribution." ]
[ 180, 60, 171, 36, 67, 210, 25, 334, 4, 154, 191, 95, 96, 14, 62, 177, 195, 19, 28, 23, 977, 44 ]
[ "passage: TAGS\n#task_categories-fill-mask #task_categories-text-generation #task_ids-masked-language-modeling #task_ids-slot-filling #task_ids-language-modeling #annotations_creators-machine-generated #language_creators-found #multilinguality-monolingual #size_categories-10B<n<100B #source_datasets-extended|mc4 #source_datasets-extended|oscar #source_datasets-extended|cawac #language-Catalan #license-cc-by-nc-nd-4.0 #arxiv-2104.08758 #arxiv-2109.07445 #region-us \n### Dataset Summary\n\n\nCATalog is a diverse, open-source Catalan corpus for language modelling. It consists of text documents from 26 different sources, including web crawling, news, forums, digital libraries and public institutions, totaling in 17.45 billion words.### Supported Tasks and Leaderboards\n\n\n* 'Fill-Mask'\n* 'Text Generation'\n* 'other:Language-Modelling': The dataset is suitable for training a model in Language Modelling, predicting the next word in a given context. Success is measured by achieving a low Perplexity score, indicating the model's proficiency in accurately predicting subsequent words.\n* 'other:Masked-Language-Modelling': The dataset is designed for training models in Masked Language Modelling. This task involves predicting masked or hidden words within a sentence. Success is typically measured by achieving a high performance score, such as accuracy or F1 score, on correctly predicting the masked tokens.### Languages\n\n\nThis dataset is in Catalan (ca-ES). Coming from the web, some documents may contain other languages.\n\n\nDataset Structure\n-----------------", "passage: ### Data Instances\n\n\nThe dataset is provided in a JSONL format, where each row corresponds to a single document and contains a document identifier, the text, a quality score, the strategy used to evaluate the document quality, languages, and a URL of the document, if available.### Data Fields\n\n\n* 'id': text string containing the document identifier. Consists of the subdataset code, the part number and a document number.\n* 'text': text string from the document, with paragraphs separated by two newlines escape sequences. It is meant to be used directly as input for language modelling.\n* 'score': positive float number representing the document quality, ranging from 0, which represents the worst quality, to 1, the best quality.\n* 'strategy': text string describing the type of evaluation applied to obtain the document score. \"curate\" uses the heuristic evaluation from CURATE and \"perfect\" means that manual review was done and the highest score (1) is applied.\n* 'languages': dictionary containing the document languages, with a percentage indicating the character ratio for each one.\n* 'url': text string with the URL of the document, if available.### Data Splits\n\n\nWe do not provide any canonical splits for CATalog.\n\n\nDataset Creation\n----------------", "passage: ### Curation Rationale\n\n\nCATalog is mainly built on filtered, non-overlapping versions of CommonCrawl snapshots and a smaller set of manually selected corpora from specific sources. We use the CURATE pipeline, which combines exact deduplication, language identification, and scoring heuristics.\n\n\nIn the design of CATalog, we adhere to the following values:\n\n\n* (1) Scale & Flexibility. We intend to produce datasets that have a significant impact on the training of multilingual models in the range of 7B-180B parameters. Since Catalan is a medium-resource language and data acquisition is already a challenge, binary filtering will limit us in terms of the amount of data. By providing a score, we are able to easily filter the corpus according to any requirement.\n* (2) Neutral scoring. As opposed to ML-based filtering, we use simple rules and heuristics to avoid introducing further bias into the model (Dodge et al., 2021; Welbl et al., 2021). We only use FastText to reject documents in other languages.\n\n\nDuring development, we performed comparative judgment experiments to evaluate the usefulness of the scoring from the CURATE pipeline, which is intended for further filtering and analysis. We found a moderate correlation between the score and the perceived quality of the text. Our main goal was to maximize the usability of the corpus without getting into a trade-off between quantity and quality.### Source Data#### Initial Data Collection and Normalization\n\n\nWe applied extensive data processing using our CURATE pipeline.\n\n\nWe first filter documents by their language content using FastText. Only documents with at least 50% of characters in Catalan are kept. We then perform exact document deduplication. After this stage, we score each document with a tested set of 8 heuristic evaluators, inspired from other web filterings and from our own creation.\n\n\nThe following pre-existing datasets were used:\n\n\n* 'OSCAR-2301'\n* 'OSCAR-2201'\n* 'CaText'\n* 'MaCoCu-ca 1.0'\n* 'caWaC'\n* 'Colossal OSCAR 1.0'\n* 'mC4'#### Who are the source language producers?\n\n\nApart from the pre-existing datasets, all of them coming from CommonCrawl dumps, the following\nsources provided their data on Open Data Agreements:\n\n\n* Media Groups\n------------\n* 'IB3'\n* 'Grup El Món'\n* 'Vilaweb'\n* 'Nació Digital'\n* 'ACN'\n* 'Racó Català Articles'\n* 'Racó Català Fòrums (anonymized version)'\n* 'Aquí Berguedà'\n* Academic & Book Repositories\n----------------------------\n* 'Tesis Doctorals en Xarxa (TDX)'\n* 'Wikipedia'\n* 'Project Gutenberg'\n* Government Institutions\n-----------------------\n* 'Parlament de Catalunya'\n* 'Les Corts Valencianes'\n* 'Diari Oficial de la Generalitat Valenciana'\n* 'Butlletí Oficial de la Universitat d'Alacant'### Annotations\n\n\nThe score is an automatic label obtained from the aggregation of different heuristic evaluators based on predefined thresholds. Specific evaluators penalize documents for factors like minimum word count, average word per sentence, punctuation per word rate, unique sentences ratio, stopword ratio, Brunet index, language diversity, and content identified by regular expressions, providing a comprehensive approach to document scoring.", "passage: #### Annotation process\n\n\nThe process involves assigning scores between 0 and 1 to sentences, paragraphs, and documents in a hierarchical manner. Individual evaluators at different levels contribute scores that are combined using geometric means, emphasizing a probability-like interpretation to encourage evaluators to assess desirability. The final document score is derived through analogous aggregation of paragraph and document scores, distinct from a linear model.#### Who are the annotators?\n\n\n[N/A]### Personal and Sensitive Information\n\n\nBeing mostly constructed from Common Crawl, personal and sensitive information might be present.\nThis must be considered before training deep learning models with CATalog, specially in the case of text-generation models.\n\n\nConsiderations for Using the Data\n---------------------------------### Social Impact of Dataset\n\n\nCATalog promotes the Catalan language in the NLP field, enabling development of advanced applications and chatbots tailored to Catalan speakers, while improving access to information for better community understanding. However, most of the sources in the dataset are web-scraped, which may bring in biases and privacy issues, risking flawed outcomes and potential misuse.\n\n\nGiven that Catalan is a mid-resourced language with low representation in digital sources, this dataset becomes crucial for building inclusive NLP applications. It addresses the language's underrepresentation, empowering the Catalan community with improved access to text resources in their native language. However, careful consideration of potential biases and privacy issues is essential to ensure responsible and equitable technology use.### Discussion of Biases\n\n\nWeb-crawled content is over-represented with standard language varieties, impacting language model performance for minority languages. Language diversity in data is crucial to avoid bias, especially in encoding non-standard dialects, preventing the exclusion of demographic groups. Our corpus primarily focuses on Central Catalan, but we actively include Valencian and Balearic Catalan, along with diverse sociolects from platforms like Racó Català Fòrums, aiming for a more representative dataset. Despite legal uncertainties in web-scraped data, we prioritize permissive licenses and privacy protection measures, acknowledging the challenges posed by personally identifiable information (PII) within large-scale datasets. Our ongoing efforts aim to address privacy concerns and contribute to a more inclusive linguistic dataset.### Other Known Limitations\n\n\n[N/A]\n\n\nAdditional Information\n----------------------### Dataset Curators\n\n\nLanguage Technologies Unit (langtech@URL) at the Barcelona Supercomputing Center (BSC).", "passage: ### Funding\n\n\nThis work has been promoted and financed by the Generalitat de Catalunya through the Aina project." ]
7bf308cf5b27aec0e3184cba4568ec96bf2db528
Instruction チューニングの評価用データセットです。[kunishou/hh-rlhf-49k-ja](https://huggingface.co/datasets/kunishou/hh-rlhf-49k-ja/tree/main) からランダムに3000データを抽出しました。 任意のデータセットで Instruction チューニングをする際に常にこちらの評価データセットを指定することで validation loss を横並びに比較することができます。 なお、kunishou/hh-rlhf-49k-ja のデータセットでチューニングする際は以下のコードでこの評価用データセットを除外するようにして下さい。 ``` pip install datasets ``` ``` from datasets import Dataset, load_dataset df1 = load_dataset("kunishou/hh-rlhf-49k-ja") df2 = load_dataset("kunishou/hh-rlhf-eval-3k-ja") df1.set_format(type="pandas") df2.set_format(type="pandas") df1 = df1["train"][:] df2 = df2["test"][:] df2["flag"] = 1 df = pd.merge(df1, df2[["instruction", "flag"]], on="instruction", how="left") df = df[df["flag"]!=1].reset_index(drop=True) dataset = Dataset.from_pandas(df) dataset ``` hh-rlhf repository https://github.com/anthropics/hh-rlhf Anthropic/hh-rlhf https://huggingface.co/datasets/Anthropic/hh-rlhf mosaicml/dolly_hhrlhf https://huggingface.co/datasets/mosaicml/dolly_hhrlhf
kunishou/hh-rlhf-eval-3k-ja
[ "license:mit", "region:us" ]
2023-11-29T15:12:19+00:00
{"license": "mit"}
2023-11-29T15:43:10+00:00
[]
[]
TAGS #license-mit #region-us
Instruction チューニングの評価用データセットです。kunishou/hh-rlhf-49k-ja からランダムに3000データを抽出しました。 任意のデータセットで Instruction チューニングをする際に常にこちらの評価データセットを指定することで validation loss を横並びに比較することができます。 なお、kunishou/hh-rlhf-49k-ja のデータセットでチューニングする際は以下のコードでこの評価用データセットを除外するようにして下さい。 hh-rlhf repository URL Anthropic/hh-rlhf URL mosaicml/dolly_hhrlhf URL
[]
[ "TAGS\n#license-mit #region-us \n" ]
[ 11 ]
[ "passage: TAGS\n#license-mit #region-us \n" ]
12cf4f6ade67ba4c72cf37c15091089baef247f1
# Dataset Card for CA-ZH Parallel Corpus ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Source Data](#source-data) - [Data preparation](#data-preparation) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Author](#author) - [Contact Information](#contact-information) - [Copyright](#copyright) - [Licensing information](#licenciung-informatrion) - [Funding](#funding) ## Dataset Description ### Dataset Summary The CA-ZH Parallel Corpus is a Catalan-Chinese dataset of **6.833.114** parallel sentences. The dataset was created to support Catalan NLP tasks, e.g., Machine Translation. ### Supported Tasks and Leaderboards The dataset can be used to train a model for Multilingual Machine Translation. Success on this task is typically measured by achieving a high BLEU score. ### Languages The texts in the dataset are in Catalan and Chinese. ## Dataset Structure Two separated txt files are provided with the sentences sorted in the same order: - ca-zh_all_2023_10_26.ca: contains 6.833.114 Catalan sentences. - ca-zh_all_2023_10_26.zh: contains 6.833.114 Chinese sentences. ### Data Splits The dataset contains a single split: `train`. ## Dataset Creation ### Source Data The Catalan-Chinese data collected from the web was a combination of the following datasets: | Dataset | Sentences before cleaning | |:------------------|---------------:| | WikiMatrix | 90.643 | | XLENT | 535.803 | | GNOME | 78| | QED | 3.677 | | TED2020 v1 | 56.269 | | OpenSubtitles | 139.300 | | **Total** | **882.039** | The 6.658.607 sentence pairs of synthetic parallel data were created from the following Spanish-Chinese datasets: | Dataset | Sentences before cleaning | |:------------------|---------------:| | UNPC |17.599.223| | CCMatrix | 24.051.233 | | MultiParacrawl| 3.410.087| | **Total** | **45.060.543** | ### Data preparation The Chinese side of all datasets are passed through the [fastlangid](https://github.com/currentslab/fastlangid) language detector and any sentences which are not identified as simplified Chinese are discarded. The datasets are then also deduplicated and filtered to remove any sentence pairs with a cosine similarity of less than 0.75. This is done using sentence embeddings calculated using [LaBSE](https://huggingface.co/sentence-transformers/LaBSE). The filtered datasets are then concatenated to form a final corpus of **6.833.114** parallel sentences. ### Personal and Sensitive Information No anonymisation process was performed. ## Considerations for Using the Data ### Social Impact of Dataset The purpose of this dataset is to help develop Machine Translation tasks for mid-resource languages such as Catalan. ### Discussion of Biases We are aware that since part of the data comes from unreliable web pages and non-curated texts, some biases may be present in the dataset. Nonetheless, we have not applied any steps to reduce their impact. ### Other Known Limitations The dataset contains data of a general domain. Application of this dataset in more specific domains such as biomedical, legal etc. would be of limited use. ## Additional Information ### Author Language Technologies Unit (LangTech) at the Barcelona Supercomputing Center. ### Contact information For further information, please send an email to [email protected]. ### Copyright Copyright Language Technologies Unit at Barcelona Supercomputing Center (2023). ### Licensing information This work is licensed under a [Attribution-NonCommercial-ShareAlike 4.0 International](https://creativecommons.org/licenses/by-nc-sa/4.0/). ### Funding This work has been promoted and financed by the Generalitat de Catalunya through the [Aina project] (https://projecteaina.cat/).
projecte-aina/CA-ZH_Parallel_Corpus
[ "task_categories:translation", "multilinguality:translation", "size_categories:1M<n<10M", "source_datasets:original", "language:ca", "language:zh", "language:multilingual", "region:us" ]
2023-11-29T15:14:32+00:00
{"language": ["ca", "zh", "multilingual"], "multilinguality": ["translation"], "size_categories": ["1M<n<10M"], "source_datasets": ["original"], "task_categories": ["translation"], "task_ids": [], "pretty_name": "CA-ZH Parallel Corpus"}
2024-01-17T13:52:31+00:00
[]
[ "ca", "zh", "multilingual" ]
TAGS #task_categories-translation #multilinguality-translation #size_categories-1M<n<10M #source_datasets-original #language-Catalan #language-Chinese #language-multilingual #region-us
Dataset Card for CA-ZH Parallel Corpus ====================================== Table of Contents ----------------- * Dataset Description + Dataset Summary + Supported Tasks + Languages * Dataset Structure + Data Splits * Dataset Creation + Source Data + Data preparation + Personal and Sensitive Information * Considerations for Using the Data + Social Impact of Dataset + Discussion of Biases + Other Known Limitations * Additional Information + Author + Contact Information + Copyright + Licensing information + Funding Dataset Description ------------------- ### Dataset Summary The CA-ZH Parallel Corpus is a Catalan-Chinese dataset of 6.833.114 parallel sentences. The dataset was created to support Catalan NLP tasks, e.g., Machine Translation. ### Supported Tasks and Leaderboards The dataset can be used to train a model for Multilingual Machine Translation. Success on this task is typically measured by achieving a high BLEU score. ### Languages The texts in the dataset are in Catalan and Chinese. Dataset Structure ----------------- Two separated txt files are provided with the sentences sorted in the same order: * ca-zh\_all\_2023\_10\_26.ca: contains 6.833.114 Catalan sentences. * ca-zh\_all\_2023\_10\_26.zh: contains 6.833.114 Chinese sentences. ### Data Splits The dataset contains a single split: 'train'. Dataset Creation ---------------- ### Source Data The Catalan-Chinese data collected from the web was a combination of the following datasets: The 6.658.607 sentence pairs of synthetic parallel data were created from the following Spanish-Chinese datasets: ### Data preparation The Chinese side of all datasets are passed through the fastlangid language detector and any sentences which are not identified as simplified Chinese are discarded. The datasets are then also deduplicated and filtered to remove any sentence pairs with a cosine similarity of less than 0.75. This is done using sentence embeddings calculated using LaBSE. The filtered datasets are then concatenated to form a final corpus of 6.833.114 parallel sentences. ### Personal and Sensitive Information No anonymisation process was performed. Considerations for Using the Data --------------------------------- ### Social Impact of Dataset The purpose of this dataset is to help develop Machine Translation tasks for mid-resource languages such as Catalan. ### Discussion of Biases We are aware that since part of the data comes from unreliable web pages and non-curated texts, some biases may be present in the dataset. Nonetheless, we have not applied any steps to reduce their impact. ### Other Known Limitations The dataset contains data of a general domain. Application of this dataset in more specific domains such as biomedical, legal etc. would be of limited use. Additional Information ---------------------- ### Author Language Technologies Unit (LangTech) at the Barcelona Supercomputing Center. ### Contact information For further information, please send an email to langtech@URL. ### Copyright Copyright Language Technologies Unit at Barcelona Supercomputing Center (2023). ### Licensing information This work is licensed under a Attribution-NonCommercial-ShareAlike 4.0 International. ### Funding This work has been promoted and financed by the Generalitat de Catalunya through the [Aina project] (URL
[ "### Dataset Summary\n\n\nThe CA-ZH Parallel Corpus is a Catalan-Chinese dataset of 6.833.114 parallel sentences. The dataset was created to support Catalan NLP tasks, e.g.,\nMachine Translation.", "### Supported Tasks and Leaderboards\n\n\nThe dataset can be used to train a model for Multilingual Machine Translation. Success on this task is typically measured by achieving a high BLEU score.", "### Languages\n\n\nThe texts in the dataset are in Catalan and Chinese.\n\n\nDataset Structure\n-----------------\n\n\nTwo separated txt files are provided with the sentences sorted in the same order:\n\n\n* ca-zh\\_all\\_2023\\_10\\_26.ca: contains 6.833.114 Catalan sentences.\n* ca-zh\\_all\\_2023\\_10\\_26.zh: contains 6.833.114 Chinese sentences.", "### Data Splits\n\n\nThe dataset contains a single split: 'train'.\n\n\nDataset Creation\n----------------", "### Source Data\n\n\nThe Catalan-Chinese data collected from the web was a combination of the following datasets:\n\n\n\nThe 6.658.607 sentence pairs of synthetic parallel data were created from the following Spanish-Chinese datasets:", "### Data preparation\n\n\nThe Chinese side of all datasets are passed through the fastlangid language detector and any sentences which are not\nidentified as simplified Chinese are discarded.\nThe datasets are then also deduplicated and filtered to remove any sentence pairs with a cosine similarity of less than 0.75.\nThis is done using sentence embeddings calculated using LaBSE.\nThe filtered datasets are then concatenated to form a final corpus of 6.833.114 parallel sentences.", "### Personal and Sensitive Information\n\n\nNo anonymisation process was performed.\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset\n\n\nThe purpose of this dataset is to help develop Machine Translation tasks for mid-resource languages such as Catalan.", "### Discussion of Biases\n\n\nWe are aware that since part of the data comes from unreliable web pages and non-curated texts, some biases may be present in the dataset.\nNonetheless, we have not applied any steps to reduce their impact.", "### Other Known Limitations\n\n\nThe dataset contains data of a general domain. Application of this dataset in more specific domains such as biomedical, legal etc. would be of limited use.\n\n\nAdditional Information\n----------------------", "### Author\n\n\nLanguage Technologies Unit (LangTech) at the Barcelona Supercomputing Center.", "### Contact information\n\n\nFor further information, please send an email to langtech@URL.", "### Copyright\n\n\nCopyright Language Technologies Unit at Barcelona Supercomputing Center (2023).", "### Licensing information\n\n\nThis work is licensed under a Attribution-NonCommercial-ShareAlike 4.0 International.", "### Funding\n\n\nThis work has been promoted and financed by the Generalitat de Catalunya through the [Aina project] (URL" ]
[ "TAGS\n#task_categories-translation #multilinguality-translation #size_categories-1M<n<10M #source_datasets-original #language-Catalan #language-Chinese #language-multilingual #region-us \n", "### Dataset Summary\n\n\nThe CA-ZH Parallel Corpus is a Catalan-Chinese dataset of 6.833.114 parallel sentences. The dataset was created to support Catalan NLP tasks, e.g.,\nMachine Translation.", "### Supported Tasks and Leaderboards\n\n\nThe dataset can be used to train a model for Multilingual Machine Translation. Success on this task is typically measured by achieving a high BLEU score.", "### Languages\n\n\nThe texts in the dataset are in Catalan and Chinese.\n\n\nDataset Structure\n-----------------\n\n\nTwo separated txt files are provided with the sentences sorted in the same order:\n\n\n* ca-zh\\_all\\_2023\\_10\\_26.ca: contains 6.833.114 Catalan sentences.\n* ca-zh\\_all\\_2023\\_10\\_26.zh: contains 6.833.114 Chinese sentences.", "### Data Splits\n\n\nThe dataset contains a single split: 'train'.\n\n\nDataset Creation\n----------------", "### Source Data\n\n\nThe Catalan-Chinese data collected from the web was a combination of the following datasets:\n\n\n\nThe 6.658.607 sentence pairs of synthetic parallel data were created from the following Spanish-Chinese datasets:", "### Data preparation\n\n\nThe Chinese side of all datasets are passed through the fastlangid language detector and any sentences which are not\nidentified as simplified Chinese are discarded.\nThe datasets are then also deduplicated and filtered to remove any sentence pairs with a cosine similarity of less than 0.75.\nThis is done using sentence embeddings calculated using LaBSE.\nThe filtered datasets are then concatenated to form a final corpus of 6.833.114 parallel sentences.", "### Personal and Sensitive Information\n\n\nNo anonymisation process was performed.\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset\n\n\nThe purpose of this dataset is to help develop Machine Translation tasks for mid-resource languages such as Catalan.", "### Discussion of Biases\n\n\nWe are aware that since part of the data comes from unreliable web pages and non-curated texts, some biases may be present in the dataset.\nNonetheless, we have not applied any steps to reduce their impact.", "### Other Known Limitations\n\n\nThe dataset contains data of a general domain. Application of this dataset in more specific domains such as biomedical, legal etc. would be of limited use.\n\n\nAdditional Information\n----------------------", "### Author\n\n\nLanguage Technologies Unit (LangTech) at the Barcelona Supercomputing Center.", "### Contact information\n\n\nFor further information, please send an email to langtech@URL.", "### Copyright\n\n\nCopyright Language Technologies Unit at Barcelona Supercomputing Center (2023).", "### Licensing information\n\n\nThis work is licensed under a Attribution-NonCommercial-ShareAlike 4.0 International.", "### Funding\n\n\nThis work has been promoted and financed by the Generalitat de Catalunya through the [Aina project] (URL" ]
[ 58, 50, 45, 103, 25, 52, 115, 26, 32, 61, 50, 21, 18, 18, 24, 27 ]
[ "passage: TAGS\n#task_categories-translation #multilinguality-translation #size_categories-1M<n<10M #source_datasets-original #language-Catalan #language-Chinese #language-multilingual #region-us \n### Dataset Summary\n\n\nThe CA-ZH Parallel Corpus is a Catalan-Chinese dataset of 6.833.114 parallel sentences. The dataset was created to support Catalan NLP tasks, e.g.,\nMachine Translation.### Supported Tasks and Leaderboards\n\n\nThe dataset can be used to train a model for Multilingual Machine Translation. Success on this task is typically measured by achieving a high BLEU score.### Languages\n\n\nThe texts in the dataset are in Catalan and Chinese.\n\n\nDataset Structure\n-----------------\n\n\nTwo separated txt files are provided with the sentences sorted in the same order:\n\n\n* ca-zh\\_all\\_2023\\_10\\_26.ca: contains 6.833.114 Catalan sentences.\n* ca-zh\\_all\\_2023\\_10\\_26.zh: contains 6.833.114 Chinese sentences.### Data Splits\n\n\nThe dataset contains a single split: 'train'.\n\n\nDataset Creation\n----------------### Source Data\n\n\nThe Catalan-Chinese data collected from the web was a combination of the following datasets:\n\n\n\nThe 6.658.607 sentence pairs of synthetic parallel data were created from the following Spanish-Chinese datasets:### Data preparation\n\n\nThe Chinese side of all datasets are passed through the fastlangid language detector and any sentences which are not\nidentified as simplified Chinese are discarded.\nThe datasets are then also deduplicated and filtered to remove any sentence pairs with a cosine similarity of less than 0.75.\nThis is done using sentence embeddings calculated using LaBSE.\nThe filtered datasets are then concatenated to form a final corpus of 6.833.114 parallel sentences.### Personal and Sensitive Information\n\n\nNo anonymisation process was performed.\n\n\nConsiderations for Using the Data\n---------------------------------### Social Impact of Dataset\n\n\nThe purpose of this dataset is to help develop Machine Translation tasks for mid-resource languages such as Catalan." ]
83f6491ed95561f362ce9cbd98d62820ec0888f0
# Dataset Card for end2end_textclassification_with_suggestions_and_responses This dataset has been created with [Argilla](https://docs.argilla.io). As shown in the sections below, this dataset can be loaded into Argilla as explained in [Load with Argilla](#load-with-argilla), or used directly with the `datasets` library in [Load with `datasets`](#load-with-datasets). ## Dataset Description - **Homepage:** https://argilla.io - **Repository:** https://github.com/argilla-io/argilla - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This dataset contains: * A dataset configuration file conforming to the Argilla dataset format named `argilla.yaml`. This configuration file will be used to configure the dataset when using the `FeedbackDataset.from_huggingface` method in Argilla. * Dataset records in a format compatible with HuggingFace `datasets`. These records will be loaded automatically when using `FeedbackDataset.from_huggingface` and can be loaded independently using the `datasets` library via `load_dataset`. * The [annotation guidelines](#annotation-guidelines) that have been used for building and curating the dataset, if they've been defined in Argilla. ### Load with Argilla To load with Argilla, you'll just need to install Argilla as `pip install argilla --upgrade` and then use the following code: ```python import argilla as rg ds = rg.FeedbackDataset.from_huggingface("argilla/end2end_textclassification_with_suggestions_and_responses") ``` ### Load with `datasets` To load this dataset with `datasets`, you'll just need to install `datasets` as `pip install datasets --upgrade` and then use the following code: ```python from datasets import load_dataset ds = load_dataset("argilla/end2end_textclassification_with_suggestions_and_responses") ``` ### Supported Tasks and Leaderboards This dataset can contain [multiple fields, questions and responses](https://docs.argilla.io/en/latest/conceptual_guides/data_model.html#feedback-dataset) so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the [Dataset Structure section](#dataset-structure). There are no leaderboards associated with this dataset. ### Languages [More Information Needed] ## Dataset Structure ### Data in Argilla The dataset is created in Argilla with: **fields**, **questions**, **suggestions**, **metadata**, **vectors**, and **guidelines**. The **fields** are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. | Field Name | Title | Type | Required | Markdown | | ---------- | ----- | ---- | -------- | -------- | | text | Text | FieldTypes.text | True | False | The **questions** are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label_selection, multi_label_selection, or ranking. | Question Name | Title | Type | Required | Description | Values/Labels | | ------------- | ----- | ---- | -------- | ----------- | ------------- | | label | Label | QuestionTypes.label_selection | True | N/A | ['World', 'Sports', 'Business', 'Sci/Tech'] | The **suggestions** are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending "-suggestion" and "-suggestion-metadata" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with "-suggestion" and the metadata is appended with "-suggestion-metadata". The **metadata** is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the `metadata_properties` defined in the dataset configuration file in `argilla.yaml`. | Metadata Name | Title | Type | Values | Visible for Annotators | | ------------- | ----- | ---- | ------ | ---------------------- | The **guidelines**, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the [annotation guidelines](#annotation-guidelines) section. ### Data Instances An example of a dataset instance in Argilla looks as follows: ```json { "external_id": "record-0", "fields": { "text": "Wall St. Bears Claw Back Into the Black (Reuters) Reuters - Short-sellers, Wall Street\u0027s dwindling\\band of ultra-cynics, are seeing green again." }, "metadata": {}, "responses": [ { "status": "submitted", "user_id": null, "values": { "label": { "value": "Business" } } } ], "suggestions": [ { "agent": "cointegrated/rubert-tiny-bilingual-nli", "question_name": "label", "score": null, "type": null, "value": "Sports" } ], "vectors": {} } ``` While the same record in HuggingFace `datasets` looks as follows: ```json { "external_id": "record-0", "label": [ { "status": "submitted", "user_id": null, "value": "Business" } ], "label-suggestion": "Sports", "label-suggestion-metadata": { "agent": "cointegrated/rubert-tiny-bilingual-nli", "score": null, "type": null }, "metadata": "{}", "text": "Wall St. Bears Claw Back Into the Black (Reuters) Reuters - Short-sellers, Wall Street\u0027s dwindling\\band of ultra-cynics, are seeing green again." } ``` ### Data Fields Among the dataset fields, we differentiate between the following: * **Fields:** These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. * **text** is of type `FieldTypes.text`. * **Questions:** These are the questions that will be asked to the annotators. They can be of different types, such as `RatingQuestion`, `TextQuestion`, `LabelQuestion`, `MultiLabelQuestion`, and `RankingQuestion`. * **label** is of type `QuestionTypes.label_selection` with the following allowed values ['World', 'Sports', 'Business', 'Sci/Tech']. * **Suggestions:** As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable. * (optional) **label-suggestion** is of type `QuestionTypes.label_selection` with the following allowed values ['World', 'Sports', 'Business', 'Sci/Tech']. Additionally, we also have two more fields that are optional and are the following: * **metadata:** This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the `metadata_properties` defined in the dataset configuration file in `argilla.yaml`. * **external_id:** This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file. ### Data Splits The dataset contains a single split, which is `train`. ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation guidelines Classify the articles into one of the four categories. #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
argilla/end2end_textclassification_with_suggestions_and_responses
[ "size_categories:1K<n<10K", "rlfh", "argilla", "human-feedback", "region:us" ]
2023-11-29T15:15:26+00:00
{"size_categories": "1K<n<10K", "tags": ["rlfh", "argilla", "human-feedback"]}
2024-02-13T00:59:24+00:00
[]
[]
TAGS #size_categories-1K<n<10K #rlfh #argilla #human-feedback #region-us
Dataset Card for end2end\_textclassification\_with\_suggestions\_and\_responses =============================================================================== This dataset has been created with Argilla. As shown in the sections below, this dataset can be loaded into Argilla as explained in Load with Argilla, or used directly with the 'datasets' library in Load with 'datasets'. Dataset Description ------------------- * Homepage: URL * Repository: URL * Paper: * Leaderboard: * Point of Contact: ### Dataset Summary This dataset contains: * A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\_huggingface' method in Argilla. * Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\_huggingface' and can be loaded independently using the 'datasets' library via 'load\_dataset'. * The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla. ### Load with Argilla To load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code: ### Load with 'datasets' To load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code: ### Supported Tasks and Leaderboards This dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section. There are no leaderboards associated with this dataset. ### Languages Dataset Structure ----------------- ### Data in Argilla The dataset is created in Argilla with: fields, questions, suggestions, metadata, vectors, and guidelines. The fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. The questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\_selection, multi\_label\_selection, or ranking. The suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending "-suggestion" and "-suggestion-metadata" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with "-suggestion" and the metadata is appended with "-suggestion-metadata". The metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\_properties' defined in the dataset configuration file in 'URL'. The guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section. ### Data Instances An example of a dataset instance in Argilla looks as follows: While the same record in HuggingFace 'datasets' looks as follows: ### Data Fields Among the dataset fields, we differentiate between the following: * Fields: These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. + text is of type 'URL'. * Questions: These are the questions that will be asked to the annotators. They can be of different types, such as 'RatingQuestion', 'TextQuestion', 'LabelQuestion', 'MultiLabelQuestion', and 'RankingQuestion'. + label is of type 'QuestionTypes.label\_selection' with the following allowed values ['World', 'Sports', 'Business', 'Sci/Tech']. * Suggestions: As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable. + (optional) label-suggestion is of type 'QuestionTypes.label\_selection' with the following allowed values ['World', 'Sports', 'Business', 'Sci/Tech']. Additionally, we also have two more fields that are optional and are the following: * metadata: This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\_properties' defined in the dataset configuration file in 'URL'. * external\_id: This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file. ### Data Splits The dataset contains a single split, which is 'train'. Dataset Creation ---------------- ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation guidelines Classify the articles into one of the four categories. #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information Considerations for Using the Data --------------------------------- ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations Additional Information ---------------------- ### Dataset Curators ### Licensing Information ### Contributions
[ "### Dataset Summary\n\n\nThis dataset contains:\n\n\n* A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\\_huggingface' method in Argilla.\n* Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\\_huggingface' and can be loaded independently using the 'datasets' library via 'load\\_dataset'.\n* The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla.", "### Load with Argilla\n\n\nTo load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code:", "### Load with 'datasets'\n\n\nTo load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code:", "### Supported Tasks and Leaderboards\n\n\nThis dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section.\n\n\nThere are no leaderboards associated with this dataset.", "### Languages\n\n\nDataset Structure\n-----------------", "### Data in Argilla\n\n\nThe dataset is created in Argilla with: fields, questions, suggestions, metadata, vectors, and guidelines.\n\n\nThe fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\nThe questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\\_selection, multi\\_label\\_selection, or ranking.\n\n\n\nThe suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending \"-suggestion\" and \"-suggestion-metadata\" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with \"-suggestion\" and the metadata is appended with \"-suggestion-metadata\".\n\n\nThe metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n\n\n\nThe guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section.", "### Data Instances\n\n\nAn example of a dataset instance in Argilla looks as follows:\n\n\nWhile the same record in HuggingFace 'datasets' looks as follows:", "### Data Fields\n\n\nAmong the dataset fields, we differentiate between the following:\n\n\n* Fields: These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\t+ text is of type 'URL'.\n* Questions: These are the questions that will be asked to the annotators. They can be of different types, such as 'RatingQuestion', 'TextQuestion', 'LabelQuestion', 'MultiLabelQuestion', and 'RankingQuestion'.\n\n\n\t+ label is of type 'QuestionTypes.label\\_selection' with the following allowed values ['World', 'Sports', 'Business', 'Sci/Tech'].\n* Suggestions: As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable.\n\n\n\t+ (optional) label-suggestion is of type 'QuestionTypes.label\\_selection' with the following allowed values ['World', 'Sports', 'Business', 'Sci/Tech'].\n\n\nAdditionally, we also have two more fields that are optional and are the following:\n\n\n* metadata: This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n* external\\_id: This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file.", "### Data Splits\n\n\nThe dataset contains a single split, which is 'train'.\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation guidelines\n\n\nClassify the articles into one of the four categories.", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#size_categories-1K<n<10K #rlfh #argilla #human-feedback #region-us \n", "### Dataset Summary\n\n\nThis dataset contains:\n\n\n* A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\\_huggingface' method in Argilla.\n* Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\\_huggingface' and can be loaded independently using the 'datasets' library via 'load\\_dataset'.\n* The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla.", "### Load with Argilla\n\n\nTo load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code:", "### Load with 'datasets'\n\n\nTo load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code:", "### Supported Tasks and Leaderboards\n\n\nThis dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section.\n\n\nThere are no leaderboards associated with this dataset.", "### Languages\n\n\nDataset Structure\n-----------------", "### Data in Argilla\n\n\nThe dataset is created in Argilla with: fields, questions, suggestions, metadata, vectors, and guidelines.\n\n\nThe fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\nThe questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\\_selection, multi\\_label\\_selection, or ranking.\n\n\n\nThe suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending \"-suggestion\" and \"-suggestion-metadata\" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with \"-suggestion\" and the metadata is appended with \"-suggestion-metadata\".\n\n\nThe metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n\n\n\nThe guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section.", "### Data Instances\n\n\nAn example of a dataset instance in Argilla looks as follows:\n\n\nWhile the same record in HuggingFace 'datasets' looks as follows:", "### Data Fields\n\n\nAmong the dataset fields, we differentiate between the following:\n\n\n* Fields: These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\t+ text is of type 'URL'.\n* Questions: These are the questions that will be asked to the annotators. They can be of different types, such as 'RatingQuestion', 'TextQuestion', 'LabelQuestion', 'MultiLabelQuestion', and 'RankingQuestion'.\n\n\n\t+ label is of type 'QuestionTypes.label\\_selection' with the following allowed values ['World', 'Sports', 'Business', 'Sci/Tech'].\n* Suggestions: As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable.\n\n\n\t+ (optional) label-suggestion is of type 'QuestionTypes.label\\_selection' with the following allowed values ['World', 'Sports', 'Business', 'Sci/Tech'].\n\n\nAdditionally, we also have two more fields that are optional and are the following:\n\n\n* metadata: This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n* external\\_id: This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file.", "### Data Splits\n\n\nThe dataset contains a single split, which is 'train'.\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation guidelines\n\n\nClassify the articles into one of the four categories.", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 29, 162, 40, 53, 68, 11, 404, 40, 508, 27, 7, 4, 10, 10, 5, 17, 5, 9, 18, 7, 8, 14, 6, 6, 5 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #rlfh #argilla #human-feedback #region-us \n### Dataset Summary\n\n\nThis dataset contains:\n\n\n* A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\\_huggingface' method in Argilla.\n* Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\\_huggingface' and can be loaded independently using the 'datasets' library via 'load\\_dataset'.\n* The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla.### Load with Argilla\n\n\nTo load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code:### Load with 'datasets'\n\n\nTo load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code:### Supported Tasks and Leaderboards\n\n\nThis dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section.\n\n\nThere are no leaderboards associated with this dataset.### Languages\n\n\nDataset Structure\n-----------------", "passage: ### Data in Argilla\n\n\nThe dataset is created in Argilla with: fields, questions, suggestions, metadata, vectors, and guidelines.\n\n\nThe fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\nThe questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\\_selection, multi\\_label\\_selection, or ranking.\n\n\n\nThe suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending \"-suggestion\" and \"-suggestion-metadata\" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with \"-suggestion\" and the metadata is appended with \"-suggestion-metadata\".\n\n\nThe metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n\n\n\nThe guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section.### Data Instances\n\n\nAn example of a dataset instance in Argilla looks as follows:\n\n\nWhile the same record in HuggingFace 'datasets' looks as follows:" ]
115e902cd3869ad5459c48d2921fbb0a7778adf3
# Dataset Card for "aozora_kids" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ohtaman/aozora_kids
[ "region:us" ]
2023-11-29T15:34:52+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "title", "dtype": "string"}, {"name": "author", "dtype": "string"}, {"name": "content", "dtype": "string"}, {"name": "filename", "dtype": "string"}, {"name": "category", "dtype": "string"}, {"name": "short_description", "dtype": "string"}, {"name": "char_kana_type", "dtype": "string"}, {"name": "story", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 85891851, "num_examples": 1221}, {"name": "test", "num_bytes": 586251, "num_examples": 8}], "download_size": 42922184, "dataset_size": 86478102}}
2023-11-29T15:35:03+00:00
[]
[]
TAGS #region-us
# Dataset Card for "aozora_kids" More Information needed
[ "# Dataset Card for \"aozora_kids\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"aozora_kids\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"aozora_kids\"\n\nMore Information needed" ]
e27304e0e560a6fd6d603bb4786951eef2011ad4
# The Tensor Trust dataset (v1) (mirror of [GitHub version](https://github.com/HumanCompatibleAI/tensor-trust-data)) **Other Tensor Trust links:** [[Game]](https://tensortrust.ai/) [[Code]](https://github.com/HumanCompatibleAI/tensor-trust) [[Paper]](https://arxiv.org/abs/2311.01011) This HF dataset contains the raw data and derived benchmarks for the [Tensor Trust](https://tensortrust.ai/paper) project. An interactive explanation of how to load and use the data (including the meaning of the columns) is in a Jupyter notebook in this directory. You can [click here to run the notebook right now in Google Colab](https://colab.research.google.com/github/HumanCompatibleAI/tensor-trust-data/blob/main/Using%20the%20Tensor%20Trust%20dataset.ipynb).
qxcv/tensor-trust
[ "task_categories:text-generation", "size_categories:100K<n<1M", "arxiv:2311.01011", "region:us" ]
2023-11-29T16:08:39+00:00
{"size_categories": ["100K<n<1M"], "task_categories": ["text-generation"]}
2023-11-29T16:15:08+00:00
[ "2311.01011" ]
[]
TAGS #task_categories-text-generation #size_categories-100K<n<1M #arxiv-2311.01011 #region-us
# The Tensor Trust dataset (v1) (mirror of GitHub version) Other Tensor Trust links: [[Game]](URL [[Code]](URL [[Paper]](URL This HF dataset contains the raw data and derived benchmarks for the Tensor Trust project. An interactive explanation of how to load and use the data (including the meaning of the columns) is in a Jupyter notebook in this directory. You can click here to run the notebook right now in Google Colab.
[ "# The Tensor Trust dataset (v1) (mirror of GitHub version)\n\nOther Tensor Trust links: [[Game]](URL [[Code]](URL [[Paper]](URL\n\nThis HF dataset contains the raw data and derived benchmarks for the Tensor Trust project.\nAn interactive explanation of how to load and use the data (including the meaning of the columns) is in a Jupyter notebook in this directory.\nYou can click here to run the notebook right now in Google Colab." ]
[ "TAGS\n#task_categories-text-generation #size_categories-100K<n<1M #arxiv-2311.01011 #region-us \n", "# The Tensor Trust dataset (v1) (mirror of GitHub version)\n\nOther Tensor Trust links: [[Game]](URL [[Code]](URL [[Paper]](URL\n\nThis HF dataset contains the raw data and derived benchmarks for the Tensor Trust project.\nAn interactive explanation of how to load and use the data (including the meaning of the columns) is in a Jupyter notebook in this directory.\nYou can click here to run the notebook right now in Google Colab." ]
[ 38, 110 ]
[ "passage: TAGS\n#task_categories-text-generation #size_categories-100K<n<1M #arxiv-2311.01011 #region-us \n# The Tensor Trust dataset (v1) (mirror of GitHub version)\n\nOther Tensor Trust links: [[Game]](URL [[Code]](URL [[Paper]](URL\n\nThis HF dataset contains the raw data and derived benchmarks for the Tensor Trust project.\nAn interactive explanation of how to load and use the data (including the meaning of the columns) is in a Jupyter notebook in this directory.\nYou can click here to run the notebook right now in Google Colab." ]
c900297e450ef3cb0d9e1454bb8a9833d3516636
SLTPvA Dataset: - Alpaca format - All MICH Angiospermae entries as of 28-11-2023 (v1-0) Synthetic OCR: - C25 25% of cells will be randomly ALL CAPS - L25 25% of cells will be randomly all lowercase - E25 25% of all rows will be subjected to synthetic OCR augmentation - R05 5% chance that a given character in an OCR augmentation row will undergo substitution, deletion, insertion errors - Synthetic OCR augmentation rows also have random strings inserted sporadically to simulate OCR noise System message: Refactor the unstructured text into a valid JSON dictionary. The key names follow the Darwin Core Archive Standard. If a key lacks content, then insert an empty string. Fill in the following JSON structure as required: {\"catalogNumber\": \"\", \"order\": \"\", \"family\": \"\", \"scientificName\": \"\", \"scientificNameAuthorship\": \"\", \"genus\": \"\", \"subgenus\": \"\", \"specificEpithet\": \"\", \"verbatimTaxonRank\": \"\", \"infraspecificEpithet\": \"\", \"identifiedBy\": \"\", \"recordedBy\": \"\", \"recordNumber\": \"\", \"verbatimEventDate\": \"\", \"habitat\": \"\", \"occurrenceRemarks\": \"\", \"associatedTaxa\": \"\", \"country\": \"\", \"stateProvince\": \"\", \"county\": \"\", \"municipality\": \"\", \"locality\": \"\", \"decimalLatitude\": \"\", \"decimalLongitude\": \"\", \"verbatimCoordinates\": \"\", \"minimumElevationInMeters\": \"\", \"maximumElevationInMeters\": \"\"} JSON format: { "catalogNumber": "", "order": "", "family": "", "scientificName": "", "scientificNameAuthorship": "", "genus": "", "subgenus": "", "specificEpithet": "", "verbatimTaxonRank": "", "infraspecificEpithet": "", "identifiedBy": "", "recordedBy": "", "recordNumber": "", "verbatimEventDate": "", "habitat": "", "occurrenceRemarks": "", "associatedTaxa": "", "country": "", "stateProvince": "", "county": "", "municipality": "", "locality": "", "decimalLatitude": "", "decimalLongitude": "", "verbatimCoordinates": "", "minimumElevationInMeters": "", "maximumElevationInMeters": "" }
phyloforfun/HLT_MICH_Angiospermae_SLTPvA_v1-0__OCR-C25-L25-E25-R05
[ "license:cc-by-sa-4.0", "region:us" ]
2023-11-29T16:23:53+00:00
{"license": "cc-by-sa-4.0", "dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 16878481512, "num_examples": 10134076}], "download_size": 1579045698, "dataset_size": 16878481512}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-29T21:40:32+00:00
[]
[]
TAGS #license-cc-by-sa-4.0 #region-us
SLTPvA Dataset: - Alpaca format - All MICH Angiospermae entries as of 28-11-2023 (v1-0) Synthetic OCR: - C25 25% of cells will be randomly ALL CAPS - L25 25% of cells will be randomly all lowercase - E25 25% of all rows will be subjected to synthetic OCR augmentation - R05 5% chance that a given character in an OCR augmentation row will undergo substitution, deletion, insertion errors - Synthetic OCR augmentation rows also have random strings inserted sporadically to simulate OCR noise System message: Refactor the unstructured text into a valid JSON dictionary. The key names follow the Darwin Core Archive Standard. If a key lacks content, then insert an empty string. Fill in the following JSON structure as required: {\"catalogNumber\": \"\", \"order\": \"\", \"family\": \"\", \"scientificName\": \"\", \"scientificNameAuthorship\": \"\", \"genus\": \"\", \"subgenus\": \"\", \"specificEpithet\": \"\", \"verbatimTaxonRank\": \"\", \"infraspecificEpithet\": \"\", \"identifiedBy\": \"\", \"recordedBy\": \"\", \"recordNumber\": \"\", \"verbatimEventDate\": \"\", \"habitat\": \"\", \"occurrenceRemarks\": \"\", \"associatedTaxa\": \"\", \"country\": \"\", \"stateProvince\": \"\", \"county\": \"\", \"municipality\": \"\", \"locality\": \"\", \"decimalLatitude\": \"\", \"decimalLongitude\": \"\", \"verbatimCoordinates\": \"\", \"minimumElevationInMeters\": \"\", \"maximumElevationInMeters\": \"\"} JSON format: { "catalogNumber": "", "order": "", "family": "", "scientificName": "", "scientificNameAuthorship": "", "genus": "", "subgenus": "", "specificEpithet": "", "verbatimTaxonRank": "", "infraspecificEpithet": "", "identifiedBy": "", "recordedBy": "", "recordNumber": "", "verbatimEventDate": "", "habitat": "", "occurrenceRemarks": "", "associatedTaxa": "", "country": "", "stateProvince": "", "county": "", "municipality": "", "locality": "", "decimalLatitude": "", "decimalLongitude": "", "verbatimCoordinates": "", "minimumElevationInMeters": "", "maximumElevationInMeters": "" }
[]
[ "TAGS\n#license-cc-by-sa-4.0 #region-us \n" ]
[ 17 ]
[ "passage: TAGS\n#license-cc-by-sa-4.0 #region-us \n" ]