sha
stringlengths
40
40
text
stringlengths
1
13.4M
id
stringlengths
2
117
tags
listlengths
1
7.91k
created_at
stringlengths
25
25
metadata
stringlengths
2
875k
last_modified
stringlengths
25
25
arxiv
listlengths
0
25
languages
listlengths
0
7.91k
tags_str
stringlengths
17
159k
text_str
stringlengths
1
447k
text_lists
listlengths
0
352
processed_texts
listlengths
1
353
tokens_length
listlengths
1
353
input_texts
listlengths
1
40
883b23b3120ec0eec3c53562ee6646cc6f420c5c
# Dataset card for Hiking ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset description](#dataset-description) - [Dataset categories](#dataset-categories) ## Dataset description - **Homepage:** https://segments.ai/twdent/Hiking This dataset was created using [Segments.ai](https://segments.ai). It can be found [here](https://segments.ai/twdent/Hiking). ## Dataset categories | Id | Name | Description | | --- | ---- | ----------- | | 1 | traversable | - | | 2 | non-traversable | - |
twdent/Hiking
[ "task_categories:image-segmentation", "region:us" ]
2023-10-05T12:45:36+00:00
{"task_categories": ["image-segmentation"], "dataset_info": {"features": [{"name": "pixel_values", "dtype": "image"}, {"name": "label", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 316794997.0, "num_examples": 38}], "download_size": 0, "dataset_size": 316794997.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-05T17:15:24+00:00
[]
[]
TAGS #task_categories-image-segmentation #region-us
Dataset card for Hiking ======================= Table of Contents ----------------- * Table of Contents * Dataset description * Dataset categories Dataset description ------------------- * Homepage: URL This dataset was created using URL. It can be found here. Dataset categories ------------------ Id: 1, Name: traversable, Description: - Id: 2, Name: non-traversable, Description: -
[]
[ "TAGS\n#task_categories-image-segmentation #region-us \n" ]
[ 18 ]
[ "passage: TAGS\n#task_categories-image-segmentation #region-us \n" ]
f9da7e8bb9f025644db2dee54b004b9d546951d9
# Dataset Card for "agree_disagree" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
daspartho/agree_disagree
[ "region:us" ]
2023-10-05T12:46:36+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "statement", "dtype": "string"}, {"name": "reply", "dtype": "string"}, {"name": "sentiment", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 267030, "num_examples": 1660}], "download_size": 113328, "dataset_size": 267030}}
2023-10-05T12:46:44+00:00
[]
[]
TAGS #region-us
# Dataset Card for "agree_disagree" More Information needed
[ "# Dataset Card for \"agree_disagree\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"agree_disagree\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"agree_disagree\"\n\nMore Information needed" ]
c10b5da36dea7b17df480566fc0ed2c2911f6663
# Dataset Card for "processed_Cosmic_dataset_V3_inst_splitted" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
slaqrichi/processed_Cosmic_dataset_V3_inst_splitted
[ "region:us" ]
2023-10-05T13:01:37+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}, {"split": "valid", "path": "data/valid-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 77676.57894736843, "num_examples": 85}, {"name": "test", "num_bytes": 4569.210526315789, "num_examples": 5}, {"name": "valid", "num_bytes": 4569.210526315789, "num_examples": 5}], "download_size": 42971, "dataset_size": 86815.0}}
2023-10-05T13:01:46+00:00
[]
[]
TAGS #region-us
# Dataset Card for "processed_Cosmic_dataset_V3_inst_splitted" More Information needed
[ "# Dataset Card for \"processed_Cosmic_dataset_V3_inst_splitted\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"processed_Cosmic_dataset_V3_inst_splitted\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"processed_Cosmic_dataset_V3_inst_splitted\"\n\nMore Information needed" ]
299eb7016b7366935bf3a4296a530ff76b9e9c0e
# Dataset Card for "Med_QA" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
HuggingSara/medqa
[ "region:us" ]
2023-10-05T13:10:17+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "options", "struct": [{"name": "A", "dtype": "string"}, {"name": "B", "dtype": "string"}, {"name": "C", "dtype": "string"}, {"name": "D", "dtype": "string"}, {"name": "E", "dtype": "string"}]}, {"name": "meta_info", "dtype": "string"}, {"name": "answer_idx", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 9470204, "num_examples": 10178}, {"name": "validation", "num_bytes": 1184039, "num_examples": 1272}, {"name": "test", "num_bytes": 1211382, "num_examples": 1273}], "download_size": 6952745, "dataset_size": 11865625}}
2023-10-05T13:12:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Med_QA" More Information needed
[ "# Dataset Card for \"Med_QA\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Med_QA\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"Med_QA\"\n\nMore Information needed" ]
76374e9cdd1a3d37d0596780006bd3f90475583e
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: google/pegasus-large * Dataset: cnn_dailymail * Config: 3.0.0 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@sasha](https://huggingface.co/sasha) for evaluating this model.
autoevaluate/autoeval-eval-cnn_dailymail-3.0.0-36c277-93197145789
[ "autotrain", "evaluation", "region:us" ]
2023-10-05T13:20:13+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["cnn_dailymail"], "eval_info": {"task": "summarization", "model": "google/pegasus-large", "metrics": [], "dataset_name": "cnn_dailymail", "dataset_config": "3.0.0", "dataset_split": "test", "col_mapping": {"text": "article", "target": "highlights"}}}
2023-10-05T16:41:31+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Summarization * Model: google/pegasus-large * Dataset: cnn_dailymail * Config: 3.0.0 * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @sasha for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: google/pegasus-large\n* Dataset: cnn_dailymail\n* Config: 3.0.0\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @sasha for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: google/pegasus-large\n* Dataset: cnn_dailymail\n* Config: 3.0.0\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @sasha for evaluating this model." ]
[ 13, 87, 15 ]
[ "passage: TAGS\n#autotrain #evaluation #region-us \n# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: google/pegasus-large\n* Dataset: cnn_dailymail\n* Config: 3.0.0\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.## Contributions\n\nThanks to @sasha for evaluating this model." ]
805988e451a5b493a8bad65aaf05edbfd9a53416
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: ainize/bart-base-cnn * Dataset: cnn_dailymail * Config: 3.0.0 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@sasha](https://huggingface.co/sasha) for evaluating this model.
autoevaluate/autoeval-eval-cnn_dailymail-3.0.0-36c277-93197145790
[ "autotrain", "evaluation", "region:us" ]
2023-10-05T13:20:19+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["cnn_dailymail"], "eval_info": {"task": "summarization", "model": "ainize/bart-base-cnn", "metrics": [], "dataset_name": "cnn_dailymail", "dataset_config": "3.0.0", "dataset_split": "test", "col_mapping": {"text": "article", "target": "highlights"}}}
2023-10-05T13:29:24+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Summarization * Model: ainize/bart-base-cnn * Dataset: cnn_dailymail * Config: 3.0.0 * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @sasha for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: ainize/bart-base-cnn\n* Dataset: cnn_dailymail\n* Config: 3.0.0\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @sasha for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: ainize/bart-base-cnn\n* Dataset: cnn_dailymail\n* Config: 3.0.0\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @sasha for evaluating this model." ]
[ 13, 88, 15 ]
[ "passage: TAGS\n#autotrain #evaluation #region-us \n# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: ainize/bart-base-cnn\n* Dataset: cnn_dailymail\n* Config: 3.0.0\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.## Contributions\n\nThanks to @sasha for evaluating this model." ]
f1eae58aab8f968f18cba39a58fc20052d25c174
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: ainize/bart-base-cnn * Dataset: xsum * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@sasha](https://huggingface.co/sasha) for evaluating this model.
autoevaluate/autoeval-eval-xsum-default-0288cc-93201145793
[ "autotrain", "evaluation", "region:us" ]
2023-10-05T13:49:20+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["xsum"], "eval_info": {"task": "summarization", "model": "ainize/bart-base-cnn", "metrics": [], "dataset_name": "xsum", "dataset_config": "default", "dataset_split": "test", "col_mapping": {"text": "document", "target": "summary"}}}
2023-10-05T13:57:36+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Summarization * Model: ainize/bart-base-cnn * Dataset: xsum * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @sasha for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: ainize/bart-base-cnn\n* Dataset: xsum\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @sasha for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: ainize/bart-base-cnn\n* Dataset: xsum\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @sasha for evaluating this model." ]
[ 13, 84, 15 ]
[ "passage: TAGS\n#autotrain #evaluation #region-us \n# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: ainize/bart-base-cnn\n* Dataset: xsum\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.## Contributions\n\nThanks to @sasha for evaluating this model." ]
6de6eb86eb342737bc278c24156477b4c3538163
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: ainize/bart-base-cnn * Dataset: samsum * Config: samsum * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@sasha](https://huggingface.co/sasha) for evaluating this model.
autoevaluate/autoeval-eval-samsum-samsum-ccdc20-93203145794
[ "autotrain", "evaluation", "region:us" ]
2023-10-05T13:49:24+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["samsum"], "eval_info": {"task": "summarization", "model": "ainize/bart-base-cnn", "metrics": [], "dataset_name": "samsum", "dataset_config": "samsum", "dataset_split": "test", "col_mapping": {"text": "dialogue", "target": "summary"}}}
2023-10-05T13:50:08+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Summarization * Model: ainize/bart-base-cnn * Dataset: samsum * Config: samsum * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @sasha for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: ainize/bart-base-cnn\n* Dataset: samsum\n* Config: samsum\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @sasha for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: ainize/bart-base-cnn\n* Dataset: samsum\n* Config: samsum\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @sasha for evaluating this model." ]
[ 13, 85, 15 ]
[ "passage: TAGS\n#autotrain #evaluation #region-us \n# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: ainize/bart-base-cnn\n* Dataset: samsum\n* Config: samsum\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.## Contributions\n\nThanks to @sasha for evaluating this model." ]
4a14cec299d4ded3959d897e456e5e73d5735507
# Dataset Card for "test3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
xivin/test3
[ "region:us" ]
2023-10-05T14:04:57+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 28000, "num_examples": 1000}], "download_size": 2170, "dataset_size": 28000}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-05T15:14:06+00:00
[]
[]
TAGS #region-us
# Dataset Card for "test3" More Information needed
[ "# Dataset Card for \"test3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"test3\"\n\nMore Information needed" ]
[ 6, 12 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"test3\"\n\nMore Information needed" ]
54dc764e7cee361463ad021076562dad262cb05e
# Bangumi Image Base of Kaifuku Jutsushi No Yarinaoshi This is the image base of bangumi Kaifuku Jutsushi no Yarinaoshi, we detected 27 characters, 1301 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 63 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 8 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 77 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 12 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 185 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 39 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 48 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 7 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | N/A | | 8 | 143 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 208 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 25 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 8 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 7 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | N/A | | 13 | 22 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 15 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 36 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 20 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 23 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 17 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 71 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 7 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | N/A | | 21 | 6 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | N/A | N/A | | 22 | 7 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | N/A | | 23 | 24 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 7 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | N/A | | 25 | 5 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | N/A | N/A | N/A | | noise | 211 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/kaifukujutsushinoyarinaoshi
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-05T14:31:09+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-05T15:33:19+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Kaifuku Jutsushi No Yarinaoshi ==================================================== This is the image base of bangumi Kaifuku Jutsushi no Yarinaoshi, we detected 27 characters, 1301 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
e0dcd5e07552fce483f963f77f95c5e825d0b3dc
This dataset is obtained by automatically translating the dolly 15k dataset (https://huggingface.co/datasets/databricks/databricks-dolly-15k) in Italian using an open-source machine translation tool: https://pypi.org/project/argostranslate/
basilepp19/dolly-15k-it
[ "license:cc-by-nc-sa-3.0", "region:us" ]
2023-10-05T14:44:12+00:00
{"license": "cc-by-nc-sa-3.0"}
2023-10-05T14:48:15+00:00
[]
[]
TAGS #license-cc-by-nc-sa-3.0 #region-us
This dataset is obtained by automatically translating the dolly 15k dataset (URL in Italian using an open-source machine translation tool: URL
[]
[ "TAGS\n#license-cc-by-nc-sa-3.0 #region-us \n" ]
[ 19 ]
[ "passage: TAGS\n#license-cc-by-nc-sa-3.0 #region-us \n" ]
ceaef456596d915889c7b42cafc8d43b6ea996a4
# Dataset Card for "github-issues" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
loganathanspr/github-issues
[ "region:us" ]
2023-10-05T14:50:51+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "number", "dtype": "int64"}, {"name": "html_url", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "labels", "list": [{"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "color", "dtype": "string"}, {"name": "default", "dtype": "bool"}, {"name": "description", "dtype": "string"}]}, {"name": "comments", "sequence": "string"}, {"name": "created_at", "dtype": "int64"}, {"name": "updated_at", "dtype": "int64"}, {"name": "closed_at", "dtype": "int64"}, {"name": "draft", "dtype": "float64"}, {"name": "pull_request", "struct": [{"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "diff_url", "dtype": "string"}, {"name": "patch_url", "dtype": "string"}, {"name": "merged_at", "dtype": "timestamp[s]"}]}, {"name": "body", "dtype": "string"}, {"name": "is_pull_request", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 6283690, "num_examples": 500}], "download_size": 1586983, "dataset_size": 6283690}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-05T14:50:53+00:00
[]
[]
TAGS #region-us
# Dataset Card for "github-issues" More Information needed
[ "# Dataset Card for \"github-issues\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"github-issues\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"github-issues\"\n\nMore Information needed" ]
4640052d6c87e9255fac38bc6694ca335a892da0
This dataset is obtained by transforming the training and test data of the two EVALITA tasks into an LLM prompt following a template. The involved tasks are: AMI2020 (misogyny detection) and HASPEEDE-v2-2020 (hate-speech detection). For the AMI task, we used the following template: *instruction: Nel testo seguente si esprime odio contro le donne? Rispondi sì o no., input: \<text\>, output: \<sì/no\>.* Similarly, for HASPEEDE we used: *instruction: “Il testo seguente incita all’odio? Rispondi sì o no., input: \<text\>, output: \<sì/no\>.* To fill these templates, we mapped the label "1" with the word "sì" and the label "0" with the word "no", \<text\> is just the sentence from the dataset to classify.
basilepp19/evalita2020-AH-instr
[ "license:cc-by-nd-4.0", "region:us" ]
2023-10-05T14:55:07+00:00
{"license": "cc-by-nd-4.0"}
2023-10-05T14:59:14+00:00
[]
[]
TAGS #license-cc-by-nd-4.0 #region-us
This dataset is obtained by transforming the training and test data of the two EVALITA tasks into an LLM prompt following a template. The involved tasks are: AMI2020 (misogyny detection) and HASPEEDE-v2-2020 (hate-speech detection). For the AMI task, we used the following template: *instruction: Nel testo seguente si esprime odio contro le donne? Rispondi sì o no., input: \<text\>, output: \<sì/no\>.* Similarly, for HASPEEDE we used: *instruction: “Il testo seguente incita all’odio? Rispondi sì o no., input: \<text\>, output: \<sì/no\>.* To fill these templates, we mapped the label "1" with the word "sì" and the label "0" with the word "no", \<text\> is just the sentence from the dataset to classify.
[]
[ "TAGS\n#license-cc-by-nd-4.0 #region-us \n" ]
[ 17 ]
[ "passage: TAGS\n#license-cc-by-nd-4.0 #region-us \n" ]
55c797ff2ad5d083176e6b734d3edae63b337a01
# Dataset Card for "lit_test" Collection of questions, and answers about top novels from history as well as many direct excerpts from novels and textbooks. [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
cvinker/lit_test
[ "region:us" ]
2023-10-05T14:56:20+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "QUESTION", "dtype": "string"}, {"name": "ANSWER", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 15413372, "num_examples": 14051}], "download_size": 5719059, "dataset_size": 15413372}}
2023-10-10T12:19:56+00:00
[]
[]
TAGS #region-us
# Dataset Card for "lit_test" Collection of questions, and answers about top novels from history as well as many direct excerpts from novels and textbooks. More Information needed
[ "# Dataset Card for \"lit_test\"\n\nCollection of questions, and answers about top novels from history as well as many direct excerpts from novels and textbooks.\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"lit_test\"\n\nCollection of questions, and answers about top novels from history as well as many direct excerpts from novels and textbooks.\n\nMore Information needed" ]
[ 6, 42 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"lit_test\"\n\nCollection of questions, and answers about top novels from history as well as many direct excerpts from novels and textbooks.\n\nMore Information needed" ]
a343749c67df1f4bef45d7134b4adb5bbd027f96
# Dataset Card for "RunMetrics" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Fraol/RunMetrics
[ "region:us" ]
2023-10-05T14:59:22+00:00
{"dataset_info": {"features": [{"name": "source", "dtype": "string"}, {"name": "path_name", "dtype": "string"}, {"name": "file_name", "dtype": "string"}, {"name": "ref_type", "dtype": "string"}, {"name": "ref_status", "dtype": "string"}, {"name": "hash", "dtype": "string"}, {"name": "class_name", "dtype": "string"}, {"name": "method_name", "dtype": "string"}, {"name": "row_number", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 2296248627, "num_examples": 385811}], "download_size": 480698181, "dataset_size": 2296248627}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-05T14:59:41+00:00
[]
[]
TAGS #region-us
# Dataset Card for "RunMetrics" More Information needed
[ "# Dataset Card for \"RunMetrics\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"RunMetrics\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"RunMetrics\"\n\nMore Information needed" ]
8024ac7dc41b9ee24b62da0bdab21fa22884d8a5
# Bangumi Image Base of Overlord This is the image base of bangumi OVERLORD, we detected 65 characters, 4389 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 76 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 27 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 354 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 117 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 48 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 89 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 32 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 60 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 64 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 17 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 45 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 38 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 84 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 152 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 132 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 62 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 42 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 174 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 26 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 28 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 163 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 112 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 55 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 53 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 17 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 29 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 42 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 16 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 32 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | ![preview 8](28/preview_8.png) | | 29 | 72 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | ![preview 7](29/preview_7.png) | ![preview 8](29/preview_8.png) | | 30 | 14 | [Download](30/dataset.zip) | ![preview 1](30/preview_1.png) | ![preview 2](30/preview_2.png) | ![preview 3](30/preview_3.png) | ![preview 4](30/preview_4.png) | ![preview 5](30/preview_5.png) | ![preview 6](30/preview_6.png) | ![preview 7](30/preview_7.png) | ![preview 8](30/preview_8.png) | | 31 | 49 | [Download](31/dataset.zip) | ![preview 1](31/preview_1.png) | ![preview 2](31/preview_2.png) | ![preview 3](31/preview_3.png) | ![preview 4](31/preview_4.png) | ![preview 5](31/preview_5.png) | ![preview 6](31/preview_6.png) | ![preview 7](31/preview_7.png) | ![preview 8](31/preview_8.png) | | 32 | 51 | [Download](32/dataset.zip) | ![preview 1](32/preview_1.png) | ![preview 2](32/preview_2.png) | ![preview 3](32/preview_3.png) | ![preview 4](32/preview_4.png) | ![preview 5](32/preview_5.png) | ![preview 6](32/preview_6.png) | ![preview 7](32/preview_7.png) | ![preview 8](32/preview_8.png) | | 33 | 6 | [Download](33/dataset.zip) | ![preview 1](33/preview_1.png) | ![preview 2](33/preview_2.png) | ![preview 3](33/preview_3.png) | ![preview 4](33/preview_4.png) | ![preview 5](33/preview_5.png) | ![preview 6](33/preview_6.png) | N/A | N/A | | 34 | 70 | [Download](34/dataset.zip) | ![preview 1](34/preview_1.png) | ![preview 2](34/preview_2.png) | ![preview 3](34/preview_3.png) | ![preview 4](34/preview_4.png) | ![preview 5](34/preview_5.png) | ![preview 6](34/preview_6.png) | ![preview 7](34/preview_7.png) | ![preview 8](34/preview_8.png) | | 35 | 11 | [Download](35/dataset.zip) | ![preview 1](35/preview_1.png) | ![preview 2](35/preview_2.png) | ![preview 3](35/preview_3.png) | ![preview 4](35/preview_4.png) | ![preview 5](35/preview_5.png) | ![preview 6](35/preview_6.png) | ![preview 7](35/preview_7.png) | ![preview 8](35/preview_8.png) | | 36 | 113 | [Download](36/dataset.zip) | ![preview 1](36/preview_1.png) | ![preview 2](36/preview_2.png) | ![preview 3](36/preview_3.png) | ![preview 4](36/preview_4.png) | ![preview 5](36/preview_5.png) | ![preview 6](36/preview_6.png) | ![preview 7](36/preview_7.png) | ![preview 8](36/preview_8.png) | | 37 | 12 | [Download](37/dataset.zip) | ![preview 1](37/preview_1.png) | ![preview 2](37/preview_2.png) | ![preview 3](37/preview_3.png) | ![preview 4](37/preview_4.png) | ![preview 5](37/preview_5.png) | ![preview 6](37/preview_6.png) | ![preview 7](37/preview_7.png) | ![preview 8](37/preview_8.png) | | 38 | 8 | [Download](38/dataset.zip) | ![preview 1](38/preview_1.png) | ![preview 2](38/preview_2.png) | ![preview 3](38/preview_3.png) | ![preview 4](38/preview_4.png) | ![preview 5](38/preview_5.png) | ![preview 6](38/preview_6.png) | ![preview 7](38/preview_7.png) | ![preview 8](38/preview_8.png) | | 39 | 11 | [Download](39/dataset.zip) | ![preview 1](39/preview_1.png) | ![preview 2](39/preview_2.png) | ![preview 3](39/preview_3.png) | ![preview 4](39/preview_4.png) | ![preview 5](39/preview_5.png) | ![preview 6](39/preview_6.png) | ![preview 7](39/preview_7.png) | ![preview 8](39/preview_8.png) | | 40 | 167 | [Download](40/dataset.zip) | ![preview 1](40/preview_1.png) | ![preview 2](40/preview_2.png) | ![preview 3](40/preview_3.png) | ![preview 4](40/preview_4.png) | ![preview 5](40/preview_5.png) | ![preview 6](40/preview_6.png) | ![preview 7](40/preview_7.png) | ![preview 8](40/preview_8.png) | | 41 | 13 | [Download](41/dataset.zip) | ![preview 1](41/preview_1.png) | ![preview 2](41/preview_2.png) | ![preview 3](41/preview_3.png) | ![preview 4](41/preview_4.png) | ![preview 5](41/preview_5.png) | ![preview 6](41/preview_6.png) | ![preview 7](41/preview_7.png) | ![preview 8](41/preview_8.png) | | 42 | 64 | [Download](42/dataset.zip) | ![preview 1](42/preview_1.png) | ![preview 2](42/preview_2.png) | ![preview 3](42/preview_3.png) | ![preview 4](42/preview_4.png) | ![preview 5](42/preview_5.png) | ![preview 6](42/preview_6.png) | ![preview 7](42/preview_7.png) | ![preview 8](42/preview_8.png) | | 43 | 53 | [Download](43/dataset.zip) | ![preview 1](43/preview_1.png) | ![preview 2](43/preview_2.png) | ![preview 3](43/preview_3.png) | ![preview 4](43/preview_4.png) | ![preview 5](43/preview_5.png) | ![preview 6](43/preview_6.png) | ![preview 7](43/preview_7.png) | ![preview 8](43/preview_8.png) | | 44 | 6 | [Download](44/dataset.zip) | ![preview 1](44/preview_1.png) | ![preview 2](44/preview_2.png) | ![preview 3](44/preview_3.png) | ![preview 4](44/preview_4.png) | ![preview 5](44/preview_5.png) | ![preview 6](44/preview_6.png) | N/A | N/A | | 45 | 183 | [Download](45/dataset.zip) | ![preview 1](45/preview_1.png) | ![preview 2](45/preview_2.png) | ![preview 3](45/preview_3.png) | ![preview 4](45/preview_4.png) | ![preview 5](45/preview_5.png) | ![preview 6](45/preview_6.png) | ![preview 7](45/preview_7.png) | ![preview 8](45/preview_8.png) | | 46 | 48 | [Download](46/dataset.zip) | ![preview 1](46/preview_1.png) | ![preview 2](46/preview_2.png) | ![preview 3](46/preview_3.png) | ![preview 4](46/preview_4.png) | ![preview 5](46/preview_5.png) | ![preview 6](46/preview_6.png) | ![preview 7](46/preview_7.png) | ![preview 8](46/preview_8.png) | | 47 | 278 | [Download](47/dataset.zip) | ![preview 1](47/preview_1.png) | ![preview 2](47/preview_2.png) | ![preview 3](47/preview_3.png) | ![preview 4](47/preview_4.png) | ![preview 5](47/preview_5.png) | ![preview 6](47/preview_6.png) | ![preview 7](47/preview_7.png) | ![preview 8](47/preview_8.png) | | 48 | 157 | [Download](48/dataset.zip) | ![preview 1](48/preview_1.png) | ![preview 2](48/preview_2.png) | ![preview 3](48/preview_3.png) | ![preview 4](48/preview_4.png) | ![preview 5](48/preview_5.png) | ![preview 6](48/preview_6.png) | ![preview 7](48/preview_7.png) | ![preview 8](48/preview_8.png) | | 49 | 16 | [Download](49/dataset.zip) | ![preview 1](49/preview_1.png) | ![preview 2](49/preview_2.png) | ![preview 3](49/preview_3.png) | ![preview 4](49/preview_4.png) | ![preview 5](49/preview_5.png) | ![preview 6](49/preview_6.png) | ![preview 7](49/preview_7.png) | ![preview 8](49/preview_8.png) | | 50 | 41 | [Download](50/dataset.zip) | ![preview 1](50/preview_1.png) | ![preview 2](50/preview_2.png) | ![preview 3](50/preview_3.png) | ![preview 4](50/preview_4.png) | ![preview 5](50/preview_5.png) | ![preview 6](50/preview_6.png) | ![preview 7](50/preview_7.png) | ![preview 8](50/preview_8.png) | | 51 | 117 | [Download](51/dataset.zip) | ![preview 1](51/preview_1.png) | ![preview 2](51/preview_2.png) | ![preview 3](51/preview_3.png) | ![preview 4](51/preview_4.png) | ![preview 5](51/preview_5.png) | ![preview 6](51/preview_6.png) | ![preview 7](51/preview_7.png) | ![preview 8](51/preview_8.png) | | 52 | 8 | [Download](52/dataset.zip) | ![preview 1](52/preview_1.png) | ![preview 2](52/preview_2.png) | ![preview 3](52/preview_3.png) | ![preview 4](52/preview_4.png) | ![preview 5](52/preview_5.png) | ![preview 6](52/preview_6.png) | ![preview 7](52/preview_7.png) | ![preview 8](52/preview_8.png) | | 53 | 9 | [Download](53/dataset.zip) | ![preview 1](53/preview_1.png) | ![preview 2](53/preview_2.png) | ![preview 3](53/preview_3.png) | ![preview 4](53/preview_4.png) | ![preview 5](53/preview_5.png) | ![preview 6](53/preview_6.png) | ![preview 7](53/preview_7.png) | ![preview 8](53/preview_8.png) | | 54 | 27 | [Download](54/dataset.zip) | ![preview 1](54/preview_1.png) | ![preview 2](54/preview_2.png) | ![preview 3](54/preview_3.png) | ![preview 4](54/preview_4.png) | ![preview 5](54/preview_5.png) | ![preview 6](54/preview_6.png) | ![preview 7](54/preview_7.png) | ![preview 8](54/preview_8.png) | | 55 | 27 | [Download](55/dataset.zip) | ![preview 1](55/preview_1.png) | ![preview 2](55/preview_2.png) | ![preview 3](55/preview_3.png) | ![preview 4](55/preview_4.png) | ![preview 5](55/preview_5.png) | ![preview 6](55/preview_6.png) | ![preview 7](55/preview_7.png) | ![preview 8](55/preview_8.png) | | 56 | 181 | [Download](56/dataset.zip) | ![preview 1](56/preview_1.png) | ![preview 2](56/preview_2.png) | ![preview 3](56/preview_3.png) | ![preview 4](56/preview_4.png) | ![preview 5](56/preview_5.png) | ![preview 6](56/preview_6.png) | ![preview 7](56/preview_7.png) | ![preview 8](56/preview_8.png) | | 57 | 13 | [Download](57/dataset.zip) | ![preview 1](57/preview_1.png) | ![preview 2](57/preview_2.png) | ![preview 3](57/preview_3.png) | ![preview 4](57/preview_4.png) | ![preview 5](57/preview_5.png) | ![preview 6](57/preview_6.png) | ![preview 7](57/preview_7.png) | ![preview 8](57/preview_8.png) | | 58 | 9 | [Download](58/dataset.zip) | ![preview 1](58/preview_1.png) | ![preview 2](58/preview_2.png) | ![preview 3](58/preview_3.png) | ![preview 4](58/preview_4.png) | ![preview 5](58/preview_5.png) | ![preview 6](58/preview_6.png) | ![preview 7](58/preview_7.png) | ![preview 8](58/preview_8.png) | | 59 | 31 | [Download](59/dataset.zip) | ![preview 1](59/preview_1.png) | ![preview 2](59/preview_2.png) | ![preview 3](59/preview_3.png) | ![preview 4](59/preview_4.png) | ![preview 5](59/preview_5.png) | ![preview 6](59/preview_6.png) | ![preview 7](59/preview_7.png) | ![preview 8](59/preview_8.png) | | 60 | 35 | [Download](60/dataset.zip) | ![preview 1](60/preview_1.png) | ![preview 2](60/preview_2.png) | ![preview 3](60/preview_3.png) | ![preview 4](60/preview_4.png) | ![preview 5](60/preview_5.png) | ![preview 6](60/preview_6.png) | ![preview 7](60/preview_7.png) | ![preview 8](60/preview_8.png) | | 61 | 43 | [Download](61/dataset.zip) | ![preview 1](61/preview_1.png) | ![preview 2](61/preview_2.png) | ![preview 3](61/preview_3.png) | ![preview 4](61/preview_4.png) | ![preview 5](61/preview_5.png) | ![preview 6](61/preview_6.png) | ![preview 7](61/preview_7.png) | ![preview 8](61/preview_8.png) | | 62 | 51 | [Download](62/dataset.zip) | ![preview 1](62/preview_1.png) | ![preview 2](62/preview_2.png) | ![preview 3](62/preview_3.png) | ![preview 4](62/preview_4.png) | ![preview 5](62/preview_5.png) | ![preview 6](62/preview_6.png) | ![preview 7](62/preview_7.png) | ![preview 8](62/preview_8.png) | | 63 | 24 | [Download](63/dataset.zip) | ![preview 1](63/preview_1.png) | ![preview 2](63/preview_2.png) | ![preview 3](63/preview_3.png) | ![preview 4](63/preview_4.png) | ![preview 5](63/preview_5.png) | ![preview 6](63/preview_6.png) | ![preview 7](63/preview_7.png) | ![preview 8](63/preview_8.png) | | noise | 185 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/overlord
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-05T15:07:12+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-05T18:02:49+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Overlord ============================== This is the image base of bangumi OVERLORD, we detected 65 characters, 4389 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
e31df1eac61d7c1ff4f1662a3dbd64124b50a7c6
# Bangumi Image Base of Haiyore! Nyaruko-san This is the image base of bangumi Haiyore! Nyaruko-san, we detected 31 characters, 3214 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 271 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 848 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 50 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 28 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 63 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 120 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 17 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 443 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 20 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 19 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 784 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 23 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 27 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 11 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 12 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 99 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 9 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 55 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 12 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 7 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | N/A | | 20 | 34 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 23 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 13 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 15 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 8 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 10 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 7 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | N/A | | 27 | 8 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 10 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | ![preview 8](28/preview_8.png) | | 29 | 16 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | ![preview 7](29/preview_7.png) | ![preview 8](29/preview_8.png) | | noise | 152 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/haiyorenyarukosan
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-05T15:08:02+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-05T16:50:57+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Haiyore! Nyaruko-san ========================================== This is the image base of bangumi Haiyore! Nyaruko-san, we detected 31 characters, 3214 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
4f34d1c3eb04ee046f98469cc073a20b57b140e1
# Dataset Card for "UIEB" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Hikari0608/UIEB
[ "region:us" ]
2023-10-05T15:17:44+00:00
{"dataset_info": {"features": [{"name": "raw", "dtype": "image"}, {"name": "gt", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 1351356308.0, "num_examples": 800}, {"name": "val", "num_bytes": 136425185.0, "num_examples": 90}], "download_size": 1487875235, "dataset_size": 1487781493.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "val", "path": "data/val-*"}]}]}
2023-10-05T15:34:37+00:00
[]
[]
TAGS #region-us
# Dataset Card for "UIEB" More Information needed
[ "# Dataset Card for \"UIEB\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"UIEB\"\n\nMore Information needed" ]
[ 6, 12 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"UIEB\"\n\nMore Information needed" ]
f9fe13e69bdeefea70d2bac01f4fb8ea6cbb2b27
# Dataset Card for "Eval" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Hikari0608/Eval
[ "region:us" ]
2023-10-05T15:17:55+00:00
{"dataset_info": {"features": [{"name": "raw", "dtype": "image"}], "splits": [{"name": "UIEB", "num_bytes": 31221694.0, "num_examples": 60}], "download_size": 31218498, "dataset_size": 31221694.0}, "configs": [{"config_name": "default", "data_files": [{"split": "UIEB", "path": "data/UIEB-*"}]}]}
2023-10-05T15:23:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Eval" More Information needed
[ "# Dataset Card for \"Eval\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Eval\"\n\nMore Information needed" ]
[ 6, 12 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"Eval\"\n\nMore Information needed" ]
564addc122ffda1b93a62ad2b64e314da8a9807c
# Bangumi Image Base of Spice And Wolf This is the image base of bangumi Spice and Wolf, we detected 21 characters, 2749 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 155 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 176 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 61 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 66 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 25 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 31 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 18 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 948 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 93 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 64 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 778 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 43 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 73 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 36 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 24 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 22 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 23 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 16 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 14 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 20 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | noise | 63 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/spiceandwolf
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-05T15:27:36+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-05T17:08:16+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Spice And Wolf ==================================== This is the image base of bangumi Spice and Wolf, we detected 21 characters, 2749 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
0e77188de85e93b5301bae77b68dc2a952a70d94
# Bangumi Image Base of Last Exile This is the image base of bangumi LAST EXILE, we detected 29 characters, 2019 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 74 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 95 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 73 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 36 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 158 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 46 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 74 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 75 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 39 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 53 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 65 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 312 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 47 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 162 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 53 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 43 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 206 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 20 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 73 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 39 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 10 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 104 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 10 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 38 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 8 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 10 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 9 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 16 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | noise | 71 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/lastexile
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-05T15:28:02+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-05T16:49:29+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Last Exile ================================ This is the image base of bangumi LAST EXILE, we detected 29 characters, 2019 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
171fe18e2bd5cb51e27ded3494f46b4108ab13e6
# Bangumi Image Base of Shakugan No Shana This is the image base of bangumi Shakugan no Shana, we detected 66 characters, 8549 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 744 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 928 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 91 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 41 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 39 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 170 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 1431 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 128 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 222 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 263 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 133 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 84 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 152 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 15 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 43 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 71 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 32 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 36 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 32 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 44 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 289 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 59 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 441 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 22 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 46 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 620 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 130 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 41 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 45 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | ![preview 8](28/preview_8.png) | | 29 | 17 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | ![preview 7](29/preview_7.png) | ![preview 8](29/preview_8.png) | | 30 | 31 | [Download](30/dataset.zip) | ![preview 1](30/preview_1.png) | ![preview 2](30/preview_2.png) | ![preview 3](30/preview_3.png) | ![preview 4](30/preview_4.png) | ![preview 5](30/preview_5.png) | ![preview 6](30/preview_6.png) | ![preview 7](30/preview_7.png) | ![preview 8](30/preview_8.png) | | 31 | 111 | [Download](31/dataset.zip) | ![preview 1](31/preview_1.png) | ![preview 2](31/preview_2.png) | ![preview 3](31/preview_3.png) | ![preview 4](31/preview_4.png) | ![preview 5](31/preview_5.png) | ![preview 6](31/preview_6.png) | ![preview 7](31/preview_7.png) | ![preview 8](31/preview_8.png) | | 32 | 100 | [Download](32/dataset.zip) | ![preview 1](32/preview_1.png) | ![preview 2](32/preview_2.png) | ![preview 3](32/preview_3.png) | ![preview 4](32/preview_4.png) | ![preview 5](32/preview_5.png) | ![preview 6](32/preview_6.png) | ![preview 7](32/preview_7.png) | ![preview 8](32/preview_8.png) | | 33 | 25 | [Download](33/dataset.zip) | ![preview 1](33/preview_1.png) | ![preview 2](33/preview_2.png) | ![preview 3](33/preview_3.png) | ![preview 4](33/preview_4.png) | ![preview 5](33/preview_5.png) | ![preview 6](33/preview_6.png) | ![preview 7](33/preview_7.png) | ![preview 8](33/preview_8.png) | | 34 | 43 | [Download](34/dataset.zip) | ![preview 1](34/preview_1.png) | ![preview 2](34/preview_2.png) | ![preview 3](34/preview_3.png) | ![preview 4](34/preview_4.png) | ![preview 5](34/preview_5.png) | ![preview 6](34/preview_6.png) | ![preview 7](34/preview_7.png) | ![preview 8](34/preview_8.png) | | 35 | 89 | [Download](35/dataset.zip) | ![preview 1](35/preview_1.png) | ![preview 2](35/preview_2.png) | ![preview 3](35/preview_3.png) | ![preview 4](35/preview_4.png) | ![preview 5](35/preview_5.png) | ![preview 6](35/preview_6.png) | ![preview 7](35/preview_7.png) | ![preview 8](35/preview_8.png) | | 36 | 12 | [Download](36/dataset.zip) | ![preview 1](36/preview_1.png) | ![preview 2](36/preview_2.png) | ![preview 3](36/preview_3.png) | ![preview 4](36/preview_4.png) | ![preview 5](36/preview_5.png) | ![preview 6](36/preview_6.png) | ![preview 7](36/preview_7.png) | ![preview 8](36/preview_8.png) | | 37 | 17 | [Download](37/dataset.zip) | ![preview 1](37/preview_1.png) | ![preview 2](37/preview_2.png) | ![preview 3](37/preview_3.png) | ![preview 4](37/preview_4.png) | ![preview 5](37/preview_5.png) | ![preview 6](37/preview_6.png) | ![preview 7](37/preview_7.png) | ![preview 8](37/preview_8.png) | | 38 | 54 | [Download](38/dataset.zip) | ![preview 1](38/preview_1.png) | ![preview 2](38/preview_2.png) | ![preview 3](38/preview_3.png) | ![preview 4](38/preview_4.png) | ![preview 5](38/preview_5.png) | ![preview 6](38/preview_6.png) | ![preview 7](38/preview_7.png) | ![preview 8](38/preview_8.png) | | 39 | 118 | [Download](39/dataset.zip) | ![preview 1](39/preview_1.png) | ![preview 2](39/preview_2.png) | ![preview 3](39/preview_3.png) | ![preview 4](39/preview_4.png) | ![preview 5](39/preview_5.png) | ![preview 6](39/preview_6.png) | ![preview 7](39/preview_7.png) | ![preview 8](39/preview_8.png) | | 40 | 42 | [Download](40/dataset.zip) | ![preview 1](40/preview_1.png) | ![preview 2](40/preview_2.png) | ![preview 3](40/preview_3.png) | ![preview 4](40/preview_4.png) | ![preview 5](40/preview_5.png) | ![preview 6](40/preview_6.png) | ![preview 7](40/preview_7.png) | ![preview 8](40/preview_8.png) | | 41 | 390 | [Download](41/dataset.zip) | ![preview 1](41/preview_1.png) | ![preview 2](41/preview_2.png) | ![preview 3](41/preview_3.png) | ![preview 4](41/preview_4.png) | ![preview 5](41/preview_5.png) | ![preview 6](41/preview_6.png) | ![preview 7](41/preview_7.png) | ![preview 8](41/preview_8.png) | | 42 | 26 | [Download](42/dataset.zip) | ![preview 1](42/preview_1.png) | ![preview 2](42/preview_2.png) | ![preview 3](42/preview_3.png) | ![preview 4](42/preview_4.png) | ![preview 5](42/preview_5.png) | ![preview 6](42/preview_6.png) | ![preview 7](42/preview_7.png) | ![preview 8](42/preview_8.png) | | 43 | 126 | [Download](43/dataset.zip) | ![preview 1](43/preview_1.png) | ![preview 2](43/preview_2.png) | ![preview 3](43/preview_3.png) | ![preview 4](43/preview_4.png) | ![preview 5](43/preview_5.png) | ![preview 6](43/preview_6.png) | ![preview 7](43/preview_7.png) | ![preview 8](43/preview_8.png) | | 44 | 34 | [Download](44/dataset.zip) | ![preview 1](44/preview_1.png) | ![preview 2](44/preview_2.png) | ![preview 3](44/preview_3.png) | ![preview 4](44/preview_4.png) | ![preview 5](44/preview_5.png) | ![preview 6](44/preview_6.png) | ![preview 7](44/preview_7.png) | ![preview 8](44/preview_8.png) | | 45 | 14 | [Download](45/dataset.zip) | ![preview 1](45/preview_1.png) | ![preview 2](45/preview_2.png) | ![preview 3](45/preview_3.png) | ![preview 4](45/preview_4.png) | ![preview 5](45/preview_5.png) | ![preview 6](45/preview_6.png) | ![preview 7](45/preview_7.png) | ![preview 8](45/preview_8.png) | | 46 | 17 | [Download](46/dataset.zip) | ![preview 1](46/preview_1.png) | ![preview 2](46/preview_2.png) | ![preview 3](46/preview_3.png) | ![preview 4](46/preview_4.png) | ![preview 5](46/preview_5.png) | ![preview 6](46/preview_6.png) | ![preview 7](46/preview_7.png) | ![preview 8](46/preview_8.png) | | 47 | 61 | [Download](47/dataset.zip) | ![preview 1](47/preview_1.png) | ![preview 2](47/preview_2.png) | ![preview 3](47/preview_3.png) | ![preview 4](47/preview_4.png) | ![preview 5](47/preview_5.png) | ![preview 6](47/preview_6.png) | ![preview 7](47/preview_7.png) | ![preview 8](47/preview_8.png) | | 48 | 18 | [Download](48/dataset.zip) | ![preview 1](48/preview_1.png) | ![preview 2](48/preview_2.png) | ![preview 3](48/preview_3.png) | ![preview 4](48/preview_4.png) | ![preview 5](48/preview_5.png) | ![preview 6](48/preview_6.png) | ![preview 7](48/preview_7.png) | ![preview 8](48/preview_8.png) | | 49 | 22 | [Download](49/dataset.zip) | ![preview 1](49/preview_1.png) | ![preview 2](49/preview_2.png) | ![preview 3](49/preview_3.png) | ![preview 4](49/preview_4.png) | ![preview 5](49/preview_5.png) | ![preview 6](49/preview_6.png) | ![preview 7](49/preview_7.png) | ![preview 8](49/preview_8.png) | | 50 | 20 | [Download](50/dataset.zip) | ![preview 1](50/preview_1.png) | ![preview 2](50/preview_2.png) | ![preview 3](50/preview_3.png) | ![preview 4](50/preview_4.png) | ![preview 5](50/preview_5.png) | ![preview 6](50/preview_6.png) | ![preview 7](50/preview_7.png) | ![preview 8](50/preview_8.png) | | 51 | 22 | [Download](51/dataset.zip) | ![preview 1](51/preview_1.png) | ![preview 2](51/preview_2.png) | ![preview 3](51/preview_3.png) | ![preview 4](51/preview_4.png) | ![preview 5](51/preview_5.png) | ![preview 6](51/preview_6.png) | ![preview 7](51/preview_7.png) | ![preview 8](51/preview_8.png) | | 52 | 15 | [Download](52/dataset.zip) | ![preview 1](52/preview_1.png) | ![preview 2](52/preview_2.png) | ![preview 3](52/preview_3.png) | ![preview 4](52/preview_4.png) | ![preview 5](52/preview_5.png) | ![preview 6](52/preview_6.png) | ![preview 7](52/preview_7.png) | ![preview 8](52/preview_8.png) | | 53 | 11 | [Download](53/dataset.zip) | ![preview 1](53/preview_1.png) | ![preview 2](53/preview_2.png) | ![preview 3](53/preview_3.png) | ![preview 4](53/preview_4.png) | ![preview 5](53/preview_5.png) | ![preview 6](53/preview_6.png) | ![preview 7](53/preview_7.png) | ![preview 8](53/preview_8.png) | | 54 | 41 | [Download](54/dataset.zip) | ![preview 1](54/preview_1.png) | ![preview 2](54/preview_2.png) | ![preview 3](54/preview_3.png) | ![preview 4](54/preview_4.png) | ![preview 5](54/preview_5.png) | ![preview 6](54/preview_6.png) | ![preview 7](54/preview_7.png) | ![preview 8](54/preview_8.png) | | 55 | 43 | [Download](55/dataset.zip) | ![preview 1](55/preview_1.png) | ![preview 2](55/preview_2.png) | ![preview 3](55/preview_3.png) | ![preview 4](55/preview_4.png) | ![preview 5](55/preview_5.png) | ![preview 6](55/preview_6.png) | ![preview 7](55/preview_7.png) | ![preview 8](55/preview_8.png) | | 56 | 11 | [Download](56/dataset.zip) | ![preview 1](56/preview_1.png) | ![preview 2](56/preview_2.png) | ![preview 3](56/preview_3.png) | ![preview 4](56/preview_4.png) | ![preview 5](56/preview_5.png) | ![preview 6](56/preview_6.png) | ![preview 7](56/preview_7.png) | ![preview 8](56/preview_8.png) | | 57 | 29 | [Download](57/dataset.zip) | ![preview 1](57/preview_1.png) | ![preview 2](57/preview_2.png) | ![preview 3](57/preview_3.png) | ![preview 4](57/preview_4.png) | ![preview 5](57/preview_5.png) | ![preview 6](57/preview_6.png) | ![preview 7](57/preview_7.png) | ![preview 8](57/preview_8.png) | | 58 | 58 | [Download](58/dataset.zip) | ![preview 1](58/preview_1.png) | ![preview 2](58/preview_2.png) | ![preview 3](58/preview_3.png) | ![preview 4](58/preview_4.png) | ![preview 5](58/preview_5.png) | ![preview 6](58/preview_6.png) | ![preview 7](58/preview_7.png) | ![preview 8](58/preview_8.png) | | 59 | 13 | [Download](59/dataset.zip) | ![preview 1](59/preview_1.png) | ![preview 2](59/preview_2.png) | ![preview 3](59/preview_3.png) | ![preview 4](59/preview_4.png) | ![preview 5](59/preview_5.png) | ![preview 6](59/preview_6.png) | ![preview 7](59/preview_7.png) | ![preview 8](59/preview_8.png) | | 60 | 27 | [Download](60/dataset.zip) | ![preview 1](60/preview_1.png) | ![preview 2](60/preview_2.png) | ![preview 3](60/preview_3.png) | ![preview 4](60/preview_4.png) | ![preview 5](60/preview_5.png) | ![preview 6](60/preview_6.png) | ![preview 7](60/preview_7.png) | ![preview 8](60/preview_8.png) | | 61 | 18 | [Download](61/dataset.zip) | ![preview 1](61/preview_1.png) | ![preview 2](61/preview_2.png) | ![preview 3](61/preview_3.png) | ![preview 4](61/preview_4.png) | ![preview 5](61/preview_5.png) | ![preview 6](61/preview_6.png) | ![preview 7](61/preview_7.png) | ![preview 8](61/preview_8.png) | | 62 | 12 | [Download](62/dataset.zip) | ![preview 1](62/preview_1.png) | ![preview 2](62/preview_2.png) | ![preview 3](62/preview_3.png) | ![preview 4](62/preview_4.png) | ![preview 5](62/preview_5.png) | ![preview 6](62/preview_6.png) | ![preview 7](62/preview_7.png) | ![preview 8](62/preview_8.png) | | 63 | 13 | [Download](63/dataset.zip) | ![preview 1](63/preview_1.png) | ![preview 2](63/preview_2.png) | ![preview 3](63/preview_3.png) | ![preview 4](63/preview_4.png) | ![preview 5](63/preview_5.png) | ![preview 6](63/preview_6.png) | ![preview 7](63/preview_7.png) | ![preview 8](63/preview_8.png) | | 64 | 13 | [Download](64/dataset.zip) | ![preview 1](64/preview_1.png) | ![preview 2](64/preview_2.png) | ![preview 3](64/preview_3.png) | ![preview 4](64/preview_4.png) | ![preview 5](64/preview_5.png) | ![preview 6](64/preview_6.png) | ![preview 7](64/preview_7.png) | ![preview 8](64/preview_8.png) | | noise | 444 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/shakugannoshana
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-05T15:28:52+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-05T20:01:17+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Shakugan No Shana ======================================= This is the image base of bangumi Shakugan no Shana, we detected 66 characters, 8549 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
bd3b3f769e2ab7aa2bdd1c91f5991640a2ff9684
# Dataset Card for "budget-seq2seq-json" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
napatswift/budget-seq2seq-json
[ "region:us" ]
2023-10-05T15:33:07+00:00
{"dataset_info": {"features": [{"name": "line_item", "sequence": "string"}, {"name": "target", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "format", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 231359400.0, "num_examples": 19075}], "download_size": 47272901, "dataset_size": 231359400.0}}
2023-10-05T15:34:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for "budget-seq2seq-json" More Information needed
[ "# Dataset Card for \"budget-seq2seq-json\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"budget-seq2seq-json\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"budget-seq2seq-json\"\n\nMore Information needed" ]
634b092ba85a2294c3c60dc404f942ad3bc3564a
# Dataset Card for "speech_commands-ast-finetuned-results" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
renumics/speech_commands-ast-finetuned-results
[ "region:us" ]
2023-10-05T15:46:44+00:00
{"dataset_info": {"config_name": "v0.01", "features": [{"name": "probability", "dtype": "float64"}, {"name": "prediction", "dtype": {"class_label": {"names": {"0": "yes", "1": "no", "2": "up", "3": "down", "4": "left", "5": "right", "6": "on", "7": "off", "8": "stop", "9": "go", "10": "zero", "11": "one", "12": "two", "13": "three", "14": "four", "15": "five", "16": "six", "17": "seven", "18": "eight", "19": "nine", "20": "bed", "21": "bird", "22": "cat", "23": "dog", "24": "happy", "25": "house", "26": "marvin", "27": "sheila", "28": "tree", "29": "wow", "30": "_silence_"}}}}, {"name": "embedding", "sequence": "float32"}, {"name": "entropy", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 1839348, "num_examples": 51093}, {"name": "validation", "num_bytes": 244764, "num_examples": 6799}, {"name": "test", "num_bytes": 110916, "num_examples": 3081}], "download_size": 0, "dataset_size": 2195028}, "configs": [{"config_name": "v0.01", "data_files": [{"split": "train", "path": "v0.01/train-*"}, {"split": "validation", "path": "v0.01/validation-*"}, {"split": "test", "path": "v0.01/test-*"}]}]}
2023-10-09T08:18:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for "speech_commands-ast-finetuned-results" More Information needed
[ "# Dataset Card for \"speech_commands-ast-finetuned-results\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"speech_commands-ast-finetuned-results\"\n\nMore Information needed" ]
[ 6, 26 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"speech_commands-ast-finetuned-results\"\n\nMore Information needed" ]
380d1970d1732b1cbd08467532bf5c2ed67a9fbc
# Dataset Card for "subject-gen-no-shuffle" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
chats-bug/subject-gen-no-shuffle
[ "region:us" ]
2023-10-05T15:50:09+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "subject_line", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 33316503, "num_examples": 59489}, {"name": "test", "num_bytes": 1699814, "num_examples": 3132}], "download_size": 5459208, "dataset_size": 35016317}}
2023-10-05T15:51:17+00:00
[]
[]
TAGS #region-us
# Dataset Card for "subject-gen-no-shuffle" More Information needed
[ "# Dataset Card for \"subject-gen-no-shuffle\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"subject-gen-no-shuffle\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"subject-gen-no-shuffle\"\n\nMore Information needed" ]
32580d5aca902e7eb7f926d79d5c4f9be9e905b2
# Dataset Card for "arabic-english-translation" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ninja/arabic-english-translation
[ "region:us" ]
2023-10-05T16:07:39+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "arabic", "dtype": "string"}, {"name": "english", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 228876.54205607477, "num_examples": 674}, {"name": "test", "num_bytes": 25468.457943925234, "num_examples": 75}], "download_size": 159571, "dataset_size": 254345.0}}
2023-10-05T16:07:41+00:00
[]
[]
TAGS #region-us
# Dataset Card for "arabic-english-translation" More Information needed
[ "# Dataset Card for \"arabic-english-translation\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"arabic-english-translation\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"arabic-english-translation\"\n\nMore Information needed" ]
99ceeb82bed4b59fe9412245cf450da992f5b198
The embeddings for `text-embedding-ada-002` are stored in this repo.
nlpkevinl/wikipedia_openai_embeddings
[ "region:us" ]
2023-10-05T16:27:50+00:00
{}
2023-10-06T16:40:07+00:00
[]
[]
TAGS #region-us
The embeddings for 'text-embedding-ada-002' are stored in this repo.
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
0b9618ab4897daf3d125a51b7ee2c35b51ec222b
# Bangumi Image Base of High School Fleet This is the image base of bangumi High School Fleet, we detected 52 characters, 3269 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 10 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 95 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 44 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 16 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 49 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 77 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 32 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 26 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 63 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 22 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 439 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 32 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 118 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 14 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 38 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 44 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 31 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 58 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 234 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 32 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 53 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 80 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 67 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 312 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 64 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 37 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 83 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 12 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 47 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | ![preview 8](28/preview_8.png) | | 29 | 27 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | ![preview 7](29/preview_7.png) | ![preview 8](29/preview_8.png) | | 30 | 58 | [Download](30/dataset.zip) | ![preview 1](30/preview_1.png) | ![preview 2](30/preview_2.png) | ![preview 3](30/preview_3.png) | ![preview 4](30/preview_4.png) | ![preview 5](30/preview_5.png) | ![preview 6](30/preview_6.png) | ![preview 7](30/preview_7.png) | ![preview 8](30/preview_8.png) | | 31 | 196 | [Download](31/dataset.zip) | ![preview 1](31/preview_1.png) | ![preview 2](31/preview_2.png) | ![preview 3](31/preview_3.png) | ![preview 4](31/preview_4.png) | ![preview 5](31/preview_5.png) | ![preview 6](31/preview_6.png) | ![preview 7](31/preview_7.png) | ![preview 8](31/preview_8.png) | | 32 | 17 | [Download](32/dataset.zip) | ![preview 1](32/preview_1.png) | ![preview 2](32/preview_2.png) | ![preview 3](32/preview_3.png) | ![preview 4](32/preview_4.png) | ![preview 5](32/preview_5.png) | ![preview 6](32/preview_6.png) | ![preview 7](32/preview_7.png) | ![preview 8](32/preview_8.png) | | 33 | 31 | [Download](33/dataset.zip) | ![preview 1](33/preview_1.png) | ![preview 2](33/preview_2.png) | ![preview 3](33/preview_3.png) | ![preview 4](33/preview_4.png) | ![preview 5](33/preview_5.png) | ![preview 6](33/preview_6.png) | ![preview 7](33/preview_7.png) | ![preview 8](33/preview_8.png) | | 34 | 55 | [Download](34/dataset.zip) | ![preview 1](34/preview_1.png) | ![preview 2](34/preview_2.png) | ![preview 3](34/preview_3.png) | ![preview 4](34/preview_4.png) | ![preview 5](34/preview_5.png) | ![preview 6](34/preview_6.png) | ![preview 7](34/preview_7.png) | ![preview 8](34/preview_8.png) | | 35 | 38 | [Download](35/dataset.zip) | ![preview 1](35/preview_1.png) | ![preview 2](35/preview_2.png) | ![preview 3](35/preview_3.png) | ![preview 4](35/preview_4.png) | ![preview 5](35/preview_5.png) | ![preview 6](35/preview_6.png) | ![preview 7](35/preview_7.png) | ![preview 8](35/preview_8.png) | | 36 | 24 | [Download](36/dataset.zip) | ![preview 1](36/preview_1.png) | ![preview 2](36/preview_2.png) | ![preview 3](36/preview_3.png) | ![preview 4](36/preview_4.png) | ![preview 5](36/preview_5.png) | ![preview 6](36/preview_6.png) | ![preview 7](36/preview_7.png) | ![preview 8](36/preview_8.png) | | 37 | 43 | [Download](37/dataset.zip) | ![preview 1](37/preview_1.png) | ![preview 2](37/preview_2.png) | ![preview 3](37/preview_3.png) | ![preview 4](37/preview_4.png) | ![preview 5](37/preview_5.png) | ![preview 6](37/preview_6.png) | ![preview 7](37/preview_7.png) | ![preview 8](37/preview_8.png) | | 38 | 51 | [Download](38/dataset.zip) | ![preview 1](38/preview_1.png) | ![preview 2](38/preview_2.png) | ![preview 3](38/preview_3.png) | ![preview 4](38/preview_4.png) | ![preview 5](38/preview_5.png) | ![preview 6](38/preview_6.png) | ![preview 7](38/preview_7.png) | ![preview 8](38/preview_8.png) | | 39 | 44 | [Download](39/dataset.zip) | ![preview 1](39/preview_1.png) | ![preview 2](39/preview_2.png) | ![preview 3](39/preview_3.png) | ![preview 4](39/preview_4.png) | ![preview 5](39/preview_5.png) | ![preview 6](39/preview_6.png) | ![preview 7](39/preview_7.png) | ![preview 8](39/preview_8.png) | | 40 | 12 | [Download](40/dataset.zip) | ![preview 1](40/preview_1.png) | ![preview 2](40/preview_2.png) | ![preview 3](40/preview_3.png) | ![preview 4](40/preview_4.png) | ![preview 5](40/preview_5.png) | ![preview 6](40/preview_6.png) | ![preview 7](40/preview_7.png) | ![preview 8](40/preview_8.png) | | 41 | 10 | [Download](41/dataset.zip) | ![preview 1](41/preview_1.png) | ![preview 2](41/preview_2.png) | ![preview 3](41/preview_3.png) | ![preview 4](41/preview_4.png) | ![preview 5](41/preview_5.png) | ![preview 6](41/preview_6.png) | ![preview 7](41/preview_7.png) | ![preview 8](41/preview_8.png) | | 42 | 35 | [Download](42/dataset.zip) | ![preview 1](42/preview_1.png) | ![preview 2](42/preview_2.png) | ![preview 3](42/preview_3.png) | ![preview 4](42/preview_4.png) | ![preview 5](42/preview_5.png) | ![preview 6](42/preview_6.png) | ![preview 7](42/preview_7.png) | ![preview 8](42/preview_8.png) | | 43 | 8 | [Download](43/dataset.zip) | ![preview 1](43/preview_1.png) | ![preview 2](43/preview_2.png) | ![preview 3](43/preview_3.png) | ![preview 4](43/preview_4.png) | ![preview 5](43/preview_5.png) | ![preview 6](43/preview_6.png) | ![preview 7](43/preview_7.png) | ![preview 8](43/preview_8.png) | | 44 | 24 | [Download](44/dataset.zip) | ![preview 1](44/preview_1.png) | ![preview 2](44/preview_2.png) | ![preview 3](44/preview_3.png) | ![preview 4](44/preview_4.png) | ![preview 5](44/preview_5.png) | ![preview 6](44/preview_6.png) | ![preview 7](44/preview_7.png) | ![preview 8](44/preview_8.png) | | 45 | 7 | [Download](45/dataset.zip) | ![preview 1](45/preview_1.png) | ![preview 2](45/preview_2.png) | ![preview 3](45/preview_3.png) | ![preview 4](45/preview_4.png) | ![preview 5](45/preview_5.png) | ![preview 6](45/preview_6.png) | ![preview 7](45/preview_7.png) | N/A | | 46 | 5 | [Download](46/dataset.zip) | ![preview 1](46/preview_1.png) | ![preview 2](46/preview_2.png) | ![preview 3](46/preview_3.png) | ![preview 4](46/preview_4.png) | ![preview 5](46/preview_5.png) | N/A | N/A | N/A | | 47 | 18 | [Download](47/dataset.zip) | ![preview 1](47/preview_1.png) | ![preview 2](47/preview_2.png) | ![preview 3](47/preview_3.png) | ![preview 4](47/preview_4.png) | ![preview 5](47/preview_5.png) | ![preview 6](47/preview_6.png) | ![preview 7](47/preview_7.png) | ![preview 8](47/preview_8.png) | | 48 | 22 | [Download](48/dataset.zip) | ![preview 1](48/preview_1.png) | ![preview 2](48/preview_2.png) | ![preview 3](48/preview_3.png) | ![preview 4](48/preview_4.png) | ![preview 5](48/preview_5.png) | ![preview 6](48/preview_6.png) | ![preview 7](48/preview_7.png) | ![preview 8](48/preview_8.png) | | 49 | 13 | [Download](49/dataset.zip) | ![preview 1](49/preview_1.png) | ![preview 2](49/preview_2.png) | ![preview 3](49/preview_3.png) | ![preview 4](49/preview_4.png) | ![preview 5](49/preview_5.png) | ![preview 6](49/preview_6.png) | ![preview 7](49/preview_7.png) | ![preview 8](49/preview_8.png) | | 50 | 7 | [Download](50/dataset.zip) | ![preview 1](50/preview_1.png) | ![preview 2](50/preview_2.png) | ![preview 3](50/preview_3.png) | ![preview 4](50/preview_4.png) | ![preview 5](50/preview_5.png) | ![preview 6](50/preview_6.png) | ![preview 7](50/preview_7.png) | N/A | | noise | 295 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/highschoolfleet
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-05T17:00:58+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-05T18:38:22+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of High School Fleet ======================================= This is the image base of bangumi High School Fleet, we detected 52 characters, 3269 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
65b1f4d20eeb98c6a3716e7a653892dc8335b607
# Dataset Card for "lucidrains-python-3-8192-mistral-7b" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
kye/lucidrains-python-3-8192-mistral-7b
[ "region:us" ]
2023-10-05T17:10:27+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 167436216, "num_examples": 4087}], "download_size": 38766555, "dataset_size": 167436216}}
2023-10-05T17:10:47+00:00
[]
[]
TAGS #region-us
# Dataset Card for "lucidrains-python-3-8192-mistral-7b" More Information needed
[ "# Dataset Card for \"lucidrains-python-3-8192-mistral-7b\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"lucidrains-python-3-8192-mistral-7b\"\n\nMore Information needed" ]
[ 6, 24 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"lucidrains-python-3-8192-mistral-7b\"\n\nMore Information needed" ]
557fa00cc57d11a6c9c67fee9dee949a3e1bedb2
# Dataset Card for "metamath-mistal-tokenized-16384" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
kye/metamath-mistal-tokenized-16384
[ "region:us" ]
2023-10-05T17:27:24+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 485833040, "num_examples": 5930}], "download_size": 131269443, "dataset_size": 485833040}}
2023-10-05T17:28:20+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metamath-mistal-tokenized-16384" More Information needed
[ "# Dataset Card for \"metamath-mistal-tokenized-16384\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metamath-mistal-tokenized-16384\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"metamath-mistal-tokenized-16384\"\n\nMore Information needed" ]
cb557013d08b5f459ad749115be7002ab0ad83c3
# Portuguese Varieties Identification This repository contains the code for the paper "Enhancing Portuguese Varieties Identification with Domain-Agnostic Ensemble Approaches," submitted to EACL 2024. In this README, you can find more information about the corpus created to support the training of a model to identify the Portuguese variety of a given text. The corpus is composed of four million documents across six textual domains (law, literature, news, politics, social media, web). In terms of models, we covered three types of techniques: a) a baseline model using N-Grams and Naive Bayes; b) a model using a pre-trained language model (BERT); c) Anomaly-based language identification using autoencoders. To mitigate the variability introduced by the different domains, we used an ensemble approach to combine the predictions of domain-specialized models trained in isolation. The work developed in this repository is part of the initiative **anonymized for EACL** ### Quickstart ``` # In /benchmarks folder 1. Install the requirements pip install -r requirements.txt 2. Run the benchmarking script ./run.sh ``` ### Corpus The developed corpus is a composition of pre-existing datasets initially created for other NLP tasks that provide permissive licenses. The first release of the corpus is available on [Huggingface](https://huggingface.co/datasets/Random-Mary-Smith/port_data_random). #### Data Sources The corpus consists of the following datasets: <p align="center"> <table> <tr> <th>Domain</th> <th>Variety</th> <th>Dataset</th> <th>Original Task</th> <th># Docs</th> <th>License</th> <th>Silver Labeled</th> </tr> <tr> <td rowspan="5">Literature</td> <td rowspan="3">PT-PT</td> <td><a href="http://arquivopessoa.net/">Arquivo Pessoa</a></td> <td>-</td> <td>~4k</td> <td>CC</td> <td>✔</td> </tr> <tr> <td><a href="https://www.gutenberg.org/ebooks/bookshelf/99">Gutenberg Project</a></td> <td>-</td> <td>6</td> <td>CC</td> <td>✔</td> </tr> <tr> <td><a href="https://www.clul.ulisboa.pt/recurso/corpus-de-textos-literarios">LT-Corpus</a></td> <td>-</td> <td>56</td> <td>ELRA END USER</td> <td>✘</td> </tr> <tr> <td rowspan="2">PT-BR</td> <td><a href="https://www.kaggle.com/datasets/rtatman/brazilian-portuguese-literature-corpus">Brazilian Literature</a></td> <td>Author Identification</td> <td>81</td> <td>CC</td> <td>✘</td> </tr> <tr> <td>LT-Corpus</td> <td>-</td> <td>8</td> <td>ELRA END USER</td> <td>✘</td> </tr> <tr> <td rowspan="2">Politics</td> <td>PT-PT</td> <td><a href="http://www.statmt.org/europarl/">Koehn (2005) Europarl</a></td> <td>Machine Translation</td> <td>~10k</td> <td>CC</td> <td>✘</td> </tr> <tr> <td>PT-BR</td> <td>Brazilian Senate Speeches</td> <td>-</td> <td>~5k</td> <td>CC</td> <td>✔</td> </tr> <tr> <td rowspan="2">Journalistic</td> <td>PT-PT</td> <td><a href="https://www.linguateca.pt/CETEMPublico/">CETEM Público</a></td> <td>-</td> <td>1M</td> <td>CC</td> <td>✘</td> </tr> <tr> <td>PT-BR</td> <td><a href="https://www.linguateca.pt/CETEMFolha/">CETEM Folha</a></td> <td>-</td> <td>272k</td> <td>CC</td> <td>✘</td> </tr> <tr> <td rowspan="3">Social Media</td> <td>PT-PT</td> <td><a href="https://www.aclweb.org/anthology/2021.ranlp-1.37/">Ramalho (2021)</a></td> <td>Fake News Detection</td> <td>2M</td> <td>MIT</td> <td>✔</td> </tr> <tr> <td rowspan="2">PT-BR</td> <td><a href="https://www.aclweb.org/anthology/2022.lrec-1.322/">Vargas (2022)</a></td> <td>Hate Speech Detection</td> <td>5k</td> <td>CC-BY-NC-4.0</td> <td>✘</td> </tr> <tr> <td><a href="https://www.aclweb.org/anthology/2021.wlp-1.72/">Cunha (2021)</a></td> <td>Fake News Detection</td> <td>2k</td> <td>GPL-3.0 license</td> <td>✔</td> </tr> <tr> <td>Web</td> <td>BOTH</td> <td><a href="https://www.aclweb.org/anthology/2020.lrec-1.451/">Ortiz-Suarez (2020)</a></td> <td>-</td> <td>10k</td> <td>CC</td> <td>✔</td> </tr> </table> </p> <p align="center"> <em>Table 1: Data Sources</em> </p> ##### Note: The dataset "Brazilian Senate Speeches" was created by the authors of this paper, using web crawling of the Brazilian Senate website and is available in the Huggingface repository. #### Annotation Schema & Data Preprocessing Pipeline We leveraged our knowledge of the Portuguese language to identify data sources that guaranteed mono-variety documents. However, this first release lacks any kind of supervision, so we cannot guarantee that all documents are mono-variety. In the future, we plan to release a second version of the corpus with a more robust annotation schema, combining automatic and manual annotation. To improve the quality of the corpus, we applied a preprocessing pipeline to all documents. The pipeline consists of the following steps: 1. Remove all NaN values. 2. Remove all empty documents. 3. Remove all duplicated documents. 4. Apply the [clean_text](https://github.com/jfilter/clean-text) library to remove non-relevant information for language identification from the documents. 5. Remove all documents with a length significantly more than two standard deviations from the mean length of the documents in the corpus. The pipeline is illustrated in Figure 1. <p align="center"> <img src="assets/pipeline_lid.jpg" alt="Image Description"> </p> <p align="center"> <em>Figure 1: Data Pre-Processing Pipeline</em> </p> #### Class Distribution The class distribution of the corpus is presented in Table 2. The corpus is highly imbalanced, with the majority of the documents being from the journalistic domain. In the future, we plan to release a second version of the corpus with a more balanced distribution across the six domains. Depending on the imbalance of the textual domain, we used different strategies to perform train-validation-test splits. For the heavily imbalanced domains, we ensured a minimum of 100 documents for validation and 400 for testing. In the other domains, we applied a stratified split. <p align="center"> <table> <tr> <th>Domain</th> <th># PT-PT</th> <th># PT-BR</th> <th>Stratified</th> </tr> <tr> <td>Politics</td> <td>6500</td> <td>4894</td> <td>&#10003;</td> </tr> <tr> <td>Web</td> <td>7960</td> <td>21592</td> <td>&#10003;</td> </tr> <tr> <td>Literature</td> <td>18282</td> <td>2772</td> <td>&#10003;</td> </tr> <tr> <td>Law</td> <td>392839</td> <td>5766</td> <td>&#10005;</td> </tr> <tr> <td>Journalistic</td> <td>1494494</td> <td>354180</td> <td>&#10003;</td> </tr> <tr> <td>Social Media</td> <td>2013951</td> <td>6222</td> <td>&#10005;</td> </tr> </table> </p> <p align="center"> <em>Table 2: Class Balance across the six textual domains in both varieties of Portuguese.</em> </p> #### Future Releases & How to Contribute We plan to release a second version of this corpus considering more textual domains and extending the scope to other Portuguese varieties. If you want to contribute to this corpus, please [contact us](). ### Models We explored three Machine Learning based techniques founded on the corpus compiled to present a reliable language identification model capable of operating in a real-world scenario, independent of the textual domain. The three techniques are: * A baseline model using N-Grams and Naive Bayes; * A model using a pre-trained language model (BERT); * Anomaly-based language identification using autoencoders. To mitigate the impact of the variability introduced by the different domains, we used an ensemble approach to combine the predictions of domain-specialized models trained in isolation. #### Baseline Model The baseline model is a Naive Bayes classifier trained on the TF-IDF representation of the documents. The model is trained using the [scikit-learn](https://scikit-learn.org/stable/) library. After performing a grid search to find the best hyperparameters, we obtained the following results: <table align="center"> <tr> <th>Tokenizer</th> <th># Features</th> <th>max_df</th> <th>Lowercase</th> <th>Stop_words</th> <th>Token_pattern</th> <th>Ngram_range</th> <th>Analyzer Algorithm</th> </tr> <tr> <td>NLTK Portuguese</td> <td>40000</td> <td>1.0</td> <td>False</td> <td>NLTK Stopwords</td> <td>None</td> <td>(1, 2)</td> <td>word</td> </tr> <tr> <td>NLTK Portuguese</td> <td>30000</td> <td>1.0</td> <td>False</td> <td>NLTK Stopwords</td> <td>None</td> <td>(1, 5)</td> <td>char_wb</td> </tr> </table> <p align="center"> Table 3: Hyperparameters of the baseline model. </p> The F1-scores obtained by this technique are presented in Figure 2. The architecture strugles to generalize outside the domains used for training, compromising the performance of the model in a real-world scenario. <p align="center"> <img src="assets/n_grams_isolated.jpg" alt="Image Description" style="width:70%;"> </p> <p align="center"> <em>Figure 2: F1-Scores N-Grams based Model</em> </p> #### Autoencoder Model The autoencoder model proposes a anomaly-detection approach to language identification. The model is composed of encoder-decoder feed-foward layers trained using BERTimbau embeddings as input. The results obtained by this technique are presented in Figure 3. This model presents intermidiate results between the baseline model and the BERT model. <p align="center"> <img src="assets/autoencoder_isolated.jpg" alt="Image Description" style="width:70%;"> </p> <p align="center"> <em>Figure 3: F1-Scores Autoencoder based Model</em> </p> #### BERT Model The BERT model is a fine-tuned version of [BERTimbau](https://huggingface.co/neuralmind/bert-base-portuguese-cased) on the corpus compiled. The model is trained using the [Huggingface](https://huggingface.co/) library. The results obtained by this technique are presented in Figure 4. This model is capable of generalizing to unseen domains, making it a good candidate for a real-world scenario. <p align="center"> <img src="assets/bert_isolated.jpg" alt="Image Description" style="width:70%;"> </p> <p align="center"> <em>Figure 4: F1-Scores BERT based Model</em> </p>
Random-Mary-Smith/port_data_random
[ "size_categories:1M<n<10M", "language:pt", "license:mit", "doi:10.57967/hf/1278", "region:us" ]
2023-10-05T17:41:55+00:00
{"language": ["pt"], "license": "mit", "size_categories": ["1M<n<10M"], "pretty_name": "Portuguese Language Identification", "dataset_info": [{"config_name": "law", "features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "pt-PT", "1": "pt-BR"}}}}], "splits": [{"name": "train", "num_bytes": 123139395, "num_examples": 397405}, {"name": "validation", "num_bytes": 56663, "num_examples": 200}, {"name": "test", "num_bytes": 271938, "num_examples": 1000}], "download_size": 163260550, "dataset_size": 123467996}, {"config_name": "literature", "features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "pt-PT", "1": "pt-BR"}}}}], "splits": [{"name": "train", "num_bytes": 3517766, "num_examples": 10315}, {"name": "validation", "num_bytes": 1484637, "num_examples": 4422}, {"name": "test", "num_bytes": 2125655, "num_examples": 6317}], "download_size": 9807283, "dataset_size": 7128058}, {"config_name": "news", "features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "pt-PT", "1": "pt-BR"}}}}], "splits": [{"name": "train", "num_bytes": 641932369, "num_examples": 905849}, {"name": "validation", "num_bytes": 275126414, "num_examples": 388222}, {"name": "test", "num_bytes": 393251206, "num_examples": 554603}], "download_size": 1816074839, "dataset_size": 1310309989}, {"config_name": "politics", "features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "pt-PT", "1": "pt-BR"}}}}], "splits": [{"name": "train", "num_bytes": 189377492, "num_examples": 5582}, {"name": "validation", "num_bytes": 72061567, "num_examples": 2393}, {"name": "test", "num_bytes": 103743651, "num_examples": 3419}], "download_size": 479587978, "dataset_size": 365182710}, {"config_name": "social_media", "features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "pt-PT", "1": "pt-BR"}}}}], "splits": [{"name": "train", "num_bytes": 253714655, "num_examples": 2019173}, {"name": "validation", "num_bytes": 43449, "num_examples": 400}, {"name": "test", "num_bytes": 65642, "num_examples": 600}], "download_size": 345744861, "dataset_size": 253823746}, {"config_name": "web", "features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "pt-PT", "1": "pt-BR"}}}}], "splits": [{"name": "train", "num_bytes": 59528182, "num_examples": 14480}, {"name": "validation", "num_bytes": 26157753, "num_examples": 6206}, {"name": "test", "num_bytes": 35875967, "num_examples": 8866}], "download_size": 164136819, "dataset_size": 121561902}]}
2023-11-29T16:15:35+00:00
[]
[ "pt" ]
TAGS #size_categories-1M<n<10M #language-Portuguese #license-mit #doi-10.57967/hf/1278 #region-us
Portuguese Varieties Identification =================================== This repository contains the code for the paper "Enhancing Portuguese Varieties Identification with Domain-Agnostic Ensemble Approaches," submitted to EACL 2024. In this README, you can find more information about the corpus created to support the training of a model to identify the Portuguese variety of a given text. The corpus is composed of four million documents across six textual domains (law, literature, news, politics, social media, web). In terms of models, we covered three types of techniques: a) a baseline model using N-Grams and Naive Bayes; b) a model using a pre-trained language model (BERT); c) Anomaly-based language identification using autoencoders. To mitigate the variability introduced by the different domains, we used an ensemble approach to combine the predictions of domain-specialized models trained in isolation. The work developed in this repository is part of the initiative anonymized for EACL ### Quickstart ### Corpus The developed corpus is a composition of pre-existing datasets initially created for other NLP tasks that provide permissive licenses. The first release of the corpus is available on Huggingface. #### Data Sources The corpus consists of the following datasets: *Table 1: Data Sources* ##### Note: The dataset "Brazilian Senate Speeches" was created by the authors of this paper, using web crawling of the Brazilian Senate website and is available in the Huggingface repository. #### Annotation Schema & Data Preprocessing Pipeline We leveraged our knowledge of the Portuguese language to identify data sources that guaranteed mono-variety documents. However, this first release lacks any kind of supervision, so we cannot guarantee that all documents are mono-variety. In the future, we plan to release a second version of the corpus with a more robust annotation schema, combining automatic and manual annotation. To improve the quality of the corpus, we applied a preprocessing pipeline to all documents. The pipeline consists of the following steps: 1. Remove all NaN values. 2. Remove all empty documents. 3. Remove all duplicated documents. 4. Apply the clean\_text library to remove non-relevant information for language identification from the documents. 5. Remove all documents with a length significantly more than two standard deviations from the mean length of the documents in the corpus. The pipeline is illustrated in Figure 1. ![Image Description](assets/pipeline_lid.jpg) *Figure 1: Data Pre-Processing Pipeline* #### Class Distribution The class distribution of the corpus is presented in Table 2. The corpus is highly imbalanced, with the majority of the documents being from the journalistic domain. In the future, we plan to release a second version of the corpus with a more balanced distribution across the six domains. Depending on the imbalance of the textual domain, we used different strategies to perform train-validation-test splits. For the heavily imbalanced domains, we ensured a minimum of 100 documents for validation and 400 for testing. In the other domains, we applied a stratified split. *Table 2: Class Balance across the six textual domains in both varieties of Portuguese.* #### Future Releases & How to Contribute We plan to release a second version of this corpus considering more textual domains and extending the scope to other Portuguese varieties. If you want to contribute to this corpus, please contact us. ### Models We explored three Machine Learning based techniques founded on the corpus compiled to present a reliable language identification model capable of operating in a real-world scenario, independent of the textual domain. The three techniques are: * A baseline model using N-Grams and Naive Bayes; * A model using a pre-trained language model (BERT); * Anomaly-based language identification using autoencoders. To mitigate the impact of the variability introduced by the different domains, we used an ensemble approach to combine the predictions of domain-specialized models trained in isolation. #### Baseline Model The baseline model is a Naive Bayes classifier trained on the TF-IDF representation of the documents. The model is trained using the scikit-learn library. After performing a grid search to find the best hyperparameters, we obtained the following results: | Tokenizer | # Features | max\_df | Lowercase | Stop\_words | Token\_pattern | Ngram\_range | Analyzer Algorithm | | --- | --- | --- | --- | --- | --- | --- | --- | | NLTK Portuguese | 40000 | 1.0 | False | NLTK Stopwords | None | (1, 2) | word | | NLTK Portuguese | 30000 | 1.0 | False | NLTK Stopwords | None | (1, 5) | char\_wb | Table 3: Hyperparameters of the baseline model. The F1-scores obtained by this technique are presented in Figure 2. The architecture strugles to generalize outside the domains used for training, compromising the performance of the model in a real-world scenario. ![Image Description](assets/n_grams_isolated.jpg) *Figure 2: F1-Scores N-Grams based Model* #### Autoencoder Model The autoencoder model proposes a anomaly-detection approach to language identification. The model is composed of encoder-decoder feed-foward layers trained using BERTimbau embeddings as input. The results obtained by this technique are presented in Figure 3. This model presents intermidiate results between the baseline model and the BERT model. ![Image Description](assets/autoencoder_isolated.jpg) *Figure 3: F1-Scores Autoencoder based Model* #### BERT Model The BERT model is a fine-tuned version of BERTimbau on the corpus compiled. The model is trained using the Huggingface library. The results obtained by this technique are presented in Figure 4. This model is capable of generalizing to unseen domains, making it a good candidate for a real-world scenario. ![Image Description](assets/bert_isolated.jpg) *Figure 4: F1-Scores BERT based Model*
[ "### Quickstart", "### Corpus\n\n\nThe developed corpus is a composition of pre-existing datasets initially created for other NLP tasks that provide permissive licenses. The first release of the corpus is available on Huggingface.", "#### Data Sources\n\n\nThe corpus consists of the following datasets:\n\n\n\n\n\n\n\n\n*Table 1: Data Sources*", "##### \n\n\nNote: The dataset \"Brazilian Senate Speeches\" was created by the authors of this paper, using web crawling of the Brazilian Senate website and is available in the Huggingface repository.", "#### Annotation Schema & Data Preprocessing Pipeline\n\n\nWe leveraged our knowledge of the Portuguese language to identify data sources that guaranteed mono-variety documents. However, this first release lacks any kind of supervision, so we cannot guarantee that all documents are mono-variety. In the future, we plan to release a second version of the corpus with a more robust annotation schema, combining automatic and manual annotation.\n\n\nTo improve the quality of the corpus, we applied a preprocessing pipeline to all documents. The pipeline consists of the following steps:\n\n\n1. Remove all NaN values.\n2. Remove all empty documents.\n3. Remove all duplicated documents.\n4. Apply the clean\\_text library to remove non-relevant information for language identification from the documents.\n5. Remove all documents with a length significantly more than two standard deviations from the mean length of the documents in the corpus.\n\n\nThe pipeline is illustrated in Figure 1.\n\n\n\n![Image Description](assets/pipeline_lid.jpg)\n\n\n\n\n*Figure 1: Data Pre-Processing Pipeline*", "#### Class Distribution\n\n\nThe class distribution of the corpus is presented in Table 2. The corpus is highly imbalanced, with the majority of the documents being from the journalistic domain. In the future, we plan to release a second version of the corpus with a more balanced distribution across the six domains. Depending on the imbalance of the textual domain, we used different strategies to perform train-validation-test splits. For the heavily imbalanced domains, we ensured a minimum of 100 documents for validation and 400 for testing. In the other domains, we applied a stratified split.\n\n\n\n\n\n\n\n\n*Table 2: Class Balance across the six textual domains in both varieties of Portuguese.*", "#### Future Releases & How to Contribute\n\n\nWe plan to release a second version of this corpus considering more textual domains and extending the scope to other Portuguese varieties. If you want to contribute to this corpus, please contact us.", "### Models\n\n\nWe explored three Machine Learning based techniques founded on the corpus compiled to present a reliable language identification model capable of operating in a real-world scenario, independent of the textual domain. The three techniques are:\n\n\n* A baseline model using N-Grams and Naive Bayes;\n* A model using a pre-trained language model (BERT);\n* Anomaly-based language identification using autoencoders.\n\n\nTo mitigate the impact of the variability introduced by the different domains, we used an ensemble approach to combine the predictions of domain-specialized models trained in isolation.", "#### Baseline Model\n\n\nThe baseline model is a Naive Bayes classifier trained on the TF-IDF representation of the documents. The model is trained using the scikit-learn library. After performing a grid search to find the best hyperparameters, we obtained the following results:\n\n\n\n\n| Tokenizer | # Features | max\\_df | Lowercase | Stop\\_words | Token\\_pattern | Ngram\\_range | Analyzer Algorithm |\n| --- | --- | --- | --- | --- | --- | --- | --- |\n| NLTK Portuguese | 40000 | 1.0 | False | NLTK Stopwords | None | (1, 2) | word |\n| NLTK Portuguese | 30000 | 1.0 | False | NLTK Stopwords | None | (1, 5) | char\\_wb |\n\n\n\nTable 3: Hyperparameters of the baseline model.\n\n\n\nThe F1-scores obtained by this technique are presented in Figure 2. The architecture strugles to generalize outside the domains used for training, compromising the performance of the model in a real-world scenario.\n\n\n\n![Image Description](assets/n_grams_isolated.jpg)\n\n\n\n\n*Figure 2: F1-Scores N-Grams based Model*", "#### Autoencoder Model\n\n\nThe autoencoder model proposes a anomaly-detection approach to language identification. The model is composed of encoder-decoder feed-foward layers trained using BERTimbau embeddings as input. The results obtained by this technique are presented in Figure 3. This model presents intermidiate results between the baseline model and the BERT model.\n\n\n\n![Image Description](assets/autoencoder_isolated.jpg)\n\n\n\n\n*Figure 3: F1-Scores Autoencoder based Model*", "#### BERT Model\n\n\nThe BERT model is a fine-tuned version of BERTimbau on the corpus compiled. The model is trained using the Huggingface library. The results obtained by this technique are presented in Figure 4. This model is capable of generalizing to unseen domains, making it a good candidate for a real-world scenario.\n\n\n\n![Image Description](assets/bert_isolated.jpg)\n\n\n\n\n*Figure 4: F1-Scores BERT based Model*" ]
[ "TAGS\n#size_categories-1M<n<10M #language-Portuguese #license-mit #doi-10.57967/hf/1278 #region-us \n", "### Quickstart", "### Corpus\n\n\nThe developed corpus is a composition of pre-existing datasets initially created for other NLP tasks that provide permissive licenses. The first release of the corpus is available on Huggingface.", "#### Data Sources\n\n\nThe corpus consists of the following datasets:\n\n\n\n\n\n\n\n\n*Table 1: Data Sources*", "##### \n\n\nNote: The dataset \"Brazilian Senate Speeches\" was created by the authors of this paper, using web crawling of the Brazilian Senate website and is available in the Huggingface repository.", "#### Annotation Schema & Data Preprocessing Pipeline\n\n\nWe leveraged our knowledge of the Portuguese language to identify data sources that guaranteed mono-variety documents. However, this first release lacks any kind of supervision, so we cannot guarantee that all documents are mono-variety. In the future, we plan to release a second version of the corpus with a more robust annotation schema, combining automatic and manual annotation.\n\n\nTo improve the quality of the corpus, we applied a preprocessing pipeline to all documents. The pipeline consists of the following steps:\n\n\n1. Remove all NaN values.\n2. Remove all empty documents.\n3. Remove all duplicated documents.\n4. Apply the clean\\_text library to remove non-relevant information for language identification from the documents.\n5. Remove all documents with a length significantly more than two standard deviations from the mean length of the documents in the corpus.\n\n\nThe pipeline is illustrated in Figure 1.\n\n\n\n![Image Description](assets/pipeline_lid.jpg)\n\n\n\n\n*Figure 1: Data Pre-Processing Pipeline*", "#### Class Distribution\n\n\nThe class distribution of the corpus is presented in Table 2. The corpus is highly imbalanced, with the majority of the documents being from the journalistic domain. In the future, we plan to release a second version of the corpus with a more balanced distribution across the six domains. Depending on the imbalance of the textual domain, we used different strategies to perform train-validation-test splits. For the heavily imbalanced domains, we ensured a minimum of 100 documents for validation and 400 for testing. In the other domains, we applied a stratified split.\n\n\n\n\n\n\n\n\n*Table 2: Class Balance across the six textual domains in both varieties of Portuguese.*", "#### Future Releases & How to Contribute\n\n\nWe plan to release a second version of this corpus considering more textual domains and extending the scope to other Portuguese varieties. If you want to contribute to this corpus, please contact us.", "### Models\n\n\nWe explored three Machine Learning based techniques founded on the corpus compiled to present a reliable language identification model capable of operating in a real-world scenario, independent of the textual domain. The three techniques are:\n\n\n* A baseline model using N-Grams and Naive Bayes;\n* A model using a pre-trained language model (BERT);\n* Anomaly-based language identification using autoencoders.\n\n\nTo mitigate the impact of the variability introduced by the different domains, we used an ensemble approach to combine the predictions of domain-specialized models trained in isolation.", "#### Baseline Model\n\n\nThe baseline model is a Naive Bayes classifier trained on the TF-IDF representation of the documents. The model is trained using the scikit-learn library. After performing a grid search to find the best hyperparameters, we obtained the following results:\n\n\n\n\n| Tokenizer | # Features | max\\_df | Lowercase | Stop\\_words | Token\\_pattern | Ngram\\_range | Analyzer Algorithm |\n| --- | --- | --- | --- | --- | --- | --- | --- |\n| NLTK Portuguese | 40000 | 1.0 | False | NLTK Stopwords | None | (1, 2) | word |\n| NLTK Portuguese | 30000 | 1.0 | False | NLTK Stopwords | None | (1, 5) | char\\_wb |\n\n\n\nTable 3: Hyperparameters of the baseline model.\n\n\n\nThe F1-scores obtained by this technique are presented in Figure 2. The architecture strugles to generalize outside the domains used for training, compromising the performance of the model in a real-world scenario.\n\n\n\n![Image Description](assets/n_grams_isolated.jpg)\n\n\n\n\n*Figure 2: F1-Scores N-Grams based Model*", "#### Autoencoder Model\n\n\nThe autoencoder model proposes a anomaly-detection approach to language identification. The model is composed of encoder-decoder feed-foward layers trained using BERTimbau embeddings as input. The results obtained by this technique are presented in Figure 3. This model presents intermidiate results between the baseline model and the BERT model.\n\n\n\n![Image Description](assets/autoencoder_isolated.jpg)\n\n\n\n\n*Figure 3: F1-Scores Autoencoder based Model*", "#### BERT Model\n\n\nThe BERT model is a fine-tuned version of BERTimbau on the corpus compiled. The model is trained using the Huggingface library. The results obtained by this technique are presented in Figure 4. This model is capable of generalizing to unseen domains, making it a good candidate for a real-world scenario.\n\n\n\n![Image Description](assets/bert_isolated.jpg)\n\n\n\n\n*Figure 4: F1-Scores BERT based Model*" ]
[ 41, 4, 46, 24, 49, 235, 156, 53, 135, 331, 123, 110 ]
[ "passage: TAGS\n#size_categories-1M<n<10M #language-Portuguese #license-mit #doi-10.57967/hf/1278 #region-us \n### Quickstart### Corpus\n\n\nThe developed corpus is a composition of pre-existing datasets initially created for other NLP tasks that provide permissive licenses. The first release of the corpus is available on Huggingface.#### Data Sources\n\n\nThe corpus consists of the following datasets:\n\n\n\n\n\n\n\n\n*Table 1: Data Sources*##### \n\n\nNote: The dataset \"Brazilian Senate Speeches\" was created by the authors of this paper, using web crawling of the Brazilian Senate website and is available in the Huggingface repository.#### Annotation Schema & Data Preprocessing Pipeline\n\n\nWe leveraged our knowledge of the Portuguese language to identify data sources that guaranteed mono-variety documents. However, this first release lacks any kind of supervision, so we cannot guarantee that all documents are mono-variety. In the future, we plan to release a second version of the corpus with a more robust annotation schema, combining automatic and manual annotation.\n\n\nTo improve the quality of the corpus, we applied a preprocessing pipeline to all documents. The pipeline consists of the following steps:\n\n\n1. Remove all NaN values.\n2. Remove all empty documents.\n3. Remove all duplicated documents.\n4. Apply the clean\\_text library to remove non-relevant information for language identification from the documents.\n5. Remove all documents with a length significantly more than two standard deviations from the mean length of the documents in the corpus.\n\n\nThe pipeline is illustrated in Figure 1.\n\n\n\n![Image Description](assets/pipeline_lid.jpg)\n\n\n\n\n*Figure 1: Data Pre-Processing Pipeline*", "passage: #### Class Distribution\n\n\nThe class distribution of the corpus is presented in Table 2. The corpus is highly imbalanced, with the majority of the documents being from the journalistic domain. In the future, we plan to release a second version of the corpus with a more balanced distribution across the six domains. Depending on the imbalance of the textual domain, we used different strategies to perform train-validation-test splits. For the heavily imbalanced domains, we ensured a minimum of 100 documents for validation and 400 for testing. In the other domains, we applied a stratified split.\n\n\n\n\n\n\n\n\n*Table 2: Class Balance across the six textual domains in both varieties of Portuguese.*#### Future Releases & How to Contribute\n\n\nWe plan to release a second version of this corpus considering more textual domains and extending the scope to other Portuguese varieties. If you want to contribute to this corpus, please contact us.### Models\n\n\nWe explored three Machine Learning based techniques founded on the corpus compiled to present a reliable language identification model capable of operating in a real-world scenario, independent of the textual domain. The three techniques are:\n\n\n* A baseline model using N-Grams and Naive Bayes;\n* A model using a pre-trained language model (BERT);\n* Anomaly-based language identification using autoencoders.\n\n\nTo mitigate the impact of the variability introduced by the different domains, we used an ensemble approach to combine the predictions of domain-specialized models trained in isolation." ]
9d0c0304376805eecf306c166040b700e2cf29b1
# Dataset Card for "arxiv_abstracts" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
librarian-bots/arxiv_abstracts
[ "region:us" ]
2023-10-05T17:42:24+00:00
{"dataset_info": {"features": [{"name": "title", "dtype": "string"}, {"name": "abstract", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "category", "dtype": "string"}, {"name": "prediction", "dtype": "string"}, {"name": "probability", "dtype": "float64"}, {"name": "arxiv_id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 715878, "num_examples": 500}], "download_size": 411327, "dataset_size": 715878}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-12T08:26:52+00:00
[]
[]
TAGS #region-us
# Dataset Card for "arxiv_abstracts" More Information needed
[ "# Dataset Card for \"arxiv_abstracts\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"arxiv_abstracts\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"arxiv_abstracts\"\n\nMore Information needed" ]
26e00ab7119691154297362e1ee84478dffbdfc6
# Dataset Card for "multiple-subject-gen" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
chats-bug/multiple-subject-gen
[ "region:us" ]
2023-10-05T18:18:11+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "subject_lines", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 78493229, "num_examples": 59489}, {"name": "test", "num_bytes": 4030472, "num_examples": 3132}], "download_size": 10833380, "dataset_size": 82523701}}
2023-10-06T05:30:21+00:00
[]
[]
TAGS #region-us
# Dataset Card for "multiple-subject-gen" More Information needed
[ "# Dataset Card for \"multiple-subject-gen\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"multiple-subject-gen\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"multiple-subject-gen\"\n\nMore Information needed" ]
d75254ea2c41b48efa3b291ec2c0a63aaf3c53c7
# Bangumi Image Base of Tate No Yuusha No Nariagari This is the image base of bangumi Tate no Yuusha no Nariagari, we detected 50 characters, 4925 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 264 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 62 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 18 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 52 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 16 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 93 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 1176 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 89 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 29 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 111 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 33 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 47 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 35 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 33 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 42 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 24 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 13 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 170 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 244 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 82 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 17 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 87 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 19 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 21 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 9 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 746 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 113 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 192 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 66 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | ![preview 8](28/preview_8.png) | | 29 | 105 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | ![preview 7](29/preview_7.png) | ![preview 8](29/preview_8.png) | | 30 | 23 | [Download](30/dataset.zip) | ![preview 1](30/preview_1.png) | ![preview 2](30/preview_2.png) | ![preview 3](30/preview_3.png) | ![preview 4](30/preview_4.png) | ![preview 5](30/preview_5.png) | ![preview 6](30/preview_6.png) | ![preview 7](30/preview_7.png) | ![preview 8](30/preview_8.png) | | 31 | 13 | [Download](31/dataset.zip) | ![preview 1](31/preview_1.png) | ![preview 2](31/preview_2.png) | ![preview 3](31/preview_3.png) | ![preview 4](31/preview_4.png) | ![preview 5](31/preview_5.png) | ![preview 6](31/preview_6.png) | ![preview 7](31/preview_7.png) | ![preview 8](31/preview_8.png) | | 32 | 11 | [Download](32/dataset.zip) | ![preview 1](32/preview_1.png) | ![preview 2](32/preview_2.png) | ![preview 3](32/preview_3.png) | ![preview 4](32/preview_4.png) | ![preview 5](32/preview_5.png) | ![preview 6](32/preview_6.png) | ![preview 7](32/preview_7.png) | ![preview 8](32/preview_8.png) | | 33 | 17 | [Download](33/dataset.zip) | ![preview 1](33/preview_1.png) | ![preview 2](33/preview_2.png) | ![preview 3](33/preview_3.png) | ![preview 4](33/preview_4.png) | ![preview 5](33/preview_5.png) | ![preview 6](33/preview_6.png) | ![preview 7](33/preview_7.png) | ![preview 8](33/preview_8.png) | | 34 | 53 | [Download](34/dataset.zip) | ![preview 1](34/preview_1.png) | ![preview 2](34/preview_2.png) | ![preview 3](34/preview_3.png) | ![preview 4](34/preview_4.png) | ![preview 5](34/preview_5.png) | ![preview 6](34/preview_6.png) | ![preview 7](34/preview_7.png) | ![preview 8](34/preview_8.png) | | 35 | 9 | [Download](35/dataset.zip) | ![preview 1](35/preview_1.png) | ![preview 2](35/preview_2.png) | ![preview 3](35/preview_3.png) | ![preview 4](35/preview_4.png) | ![preview 5](35/preview_5.png) | ![preview 6](35/preview_6.png) | ![preview 7](35/preview_7.png) | ![preview 8](35/preview_8.png) | | 36 | 13 | [Download](36/dataset.zip) | ![preview 1](36/preview_1.png) | ![preview 2](36/preview_2.png) | ![preview 3](36/preview_3.png) | ![preview 4](36/preview_4.png) | ![preview 5](36/preview_5.png) | ![preview 6](36/preview_6.png) | ![preview 7](36/preview_7.png) | ![preview 8](36/preview_8.png) | | 37 | 17 | [Download](37/dataset.zip) | ![preview 1](37/preview_1.png) | ![preview 2](37/preview_2.png) | ![preview 3](37/preview_3.png) | ![preview 4](37/preview_4.png) | ![preview 5](37/preview_5.png) | ![preview 6](37/preview_6.png) | ![preview 7](37/preview_7.png) | ![preview 8](37/preview_8.png) | | 38 | 49 | [Download](38/dataset.zip) | ![preview 1](38/preview_1.png) | ![preview 2](38/preview_2.png) | ![preview 3](38/preview_3.png) | ![preview 4](38/preview_4.png) | ![preview 5](38/preview_5.png) | ![preview 6](38/preview_6.png) | ![preview 7](38/preview_7.png) | ![preview 8](38/preview_8.png) | | 39 | 13 | [Download](39/dataset.zip) | ![preview 1](39/preview_1.png) | ![preview 2](39/preview_2.png) | ![preview 3](39/preview_3.png) | ![preview 4](39/preview_4.png) | ![preview 5](39/preview_5.png) | ![preview 6](39/preview_6.png) | ![preview 7](39/preview_7.png) | ![preview 8](39/preview_8.png) | | 40 | 12 | [Download](40/dataset.zip) | ![preview 1](40/preview_1.png) | ![preview 2](40/preview_2.png) | ![preview 3](40/preview_3.png) | ![preview 4](40/preview_4.png) | ![preview 5](40/preview_5.png) | ![preview 6](40/preview_6.png) | ![preview 7](40/preview_7.png) | ![preview 8](40/preview_8.png) | | 41 | 8 | [Download](41/dataset.zip) | ![preview 1](41/preview_1.png) | ![preview 2](41/preview_2.png) | ![preview 3](41/preview_3.png) | ![preview 4](41/preview_4.png) | ![preview 5](41/preview_5.png) | ![preview 6](41/preview_6.png) | ![preview 7](41/preview_7.png) | ![preview 8](41/preview_8.png) | | 42 | 170 | [Download](42/dataset.zip) | ![preview 1](42/preview_1.png) | ![preview 2](42/preview_2.png) | ![preview 3](42/preview_3.png) | ![preview 4](42/preview_4.png) | ![preview 5](42/preview_5.png) | ![preview 6](42/preview_6.png) | ![preview 7](42/preview_7.png) | ![preview 8](42/preview_8.png) | | 43 | 6 | [Download](43/dataset.zip) | ![preview 1](43/preview_1.png) | ![preview 2](43/preview_2.png) | ![preview 3](43/preview_3.png) | ![preview 4](43/preview_4.png) | ![preview 5](43/preview_5.png) | ![preview 6](43/preview_6.png) | N/A | N/A | | 44 | 13 | [Download](44/dataset.zip) | ![preview 1](44/preview_1.png) | ![preview 2](44/preview_2.png) | ![preview 3](44/preview_3.png) | ![preview 4](44/preview_4.png) | ![preview 5](44/preview_5.png) | ![preview 6](44/preview_6.png) | ![preview 7](44/preview_7.png) | ![preview 8](44/preview_8.png) | | 45 | 13 | [Download](45/dataset.zip) | ![preview 1](45/preview_1.png) | ![preview 2](45/preview_2.png) | ![preview 3](45/preview_3.png) | ![preview 4](45/preview_4.png) | ![preview 5](45/preview_5.png) | ![preview 6](45/preview_6.png) | ![preview 7](45/preview_7.png) | ![preview 8](45/preview_8.png) | | 46 | 7 | [Download](46/dataset.zip) | ![preview 1](46/preview_1.png) | ![preview 2](46/preview_2.png) | ![preview 3](46/preview_3.png) | ![preview 4](46/preview_4.png) | ![preview 5](46/preview_5.png) | ![preview 6](46/preview_6.png) | ![preview 7](46/preview_7.png) | N/A | | 47 | 5 | [Download](47/dataset.zip) | ![preview 1](47/preview_1.png) | ![preview 2](47/preview_2.png) | ![preview 3](47/preview_3.png) | ![preview 4](47/preview_4.png) | ![preview 5](47/preview_5.png) | N/A | N/A | N/A | | 48 | 46 | [Download](48/dataset.zip) | ![preview 1](48/preview_1.png) | ![preview 2](48/preview_2.png) | ![preview 3](48/preview_3.png) | ![preview 4](48/preview_4.png) | ![preview 5](48/preview_5.png) | ![preview 6](48/preview_6.png) | ![preview 7](48/preview_7.png) | ![preview 8](48/preview_8.png) | | noise | 419 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/tatenoyuushanonariagari
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-05T18:20:42+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-05T21:00:17+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Tate No Yuusha No Nariagari ================================================= This is the image base of bangumi Tate no Yuusha no Nariagari, we detected 50 characters, 4925 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
8e420e61b5836014ba787673bf661b50c6abed8d
# Bangumi Image Base of Shinmai Maou No Testament This is the image base of bangumi Shinmai Maou no Testament, we detected 35 characters, 3166 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 811 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 58 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 67 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 24 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 49 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 14 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 19 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 6 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | N/A | N/A | | 8 | 9 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 11 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 31 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 58 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 120 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 97 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 22 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 11 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 43 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 27 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 14 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 541 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 12 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 11 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 9 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 20 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 9 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 6 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | N/A | N/A | | 26 | 7 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | N/A | | 27 | 348 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 26 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | ![preview 8](28/preview_8.png) | | 29 | 43 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | ![preview 7](29/preview_7.png) | ![preview 8](29/preview_8.png) | | 30 | 11 | [Download](30/dataset.zip) | ![preview 1](30/preview_1.png) | ![preview 2](30/preview_2.png) | ![preview 3](30/preview_3.png) | ![preview 4](30/preview_4.png) | ![preview 5](30/preview_5.png) | ![preview 6](30/preview_6.png) | ![preview 7](30/preview_7.png) | ![preview 8](30/preview_8.png) | | 31 | 40 | [Download](31/dataset.zip) | ![preview 1](31/preview_1.png) | ![preview 2](31/preview_2.png) | ![preview 3](31/preview_3.png) | ![preview 4](31/preview_4.png) | ![preview 5](31/preview_5.png) | ![preview 6](31/preview_6.png) | ![preview 7](31/preview_7.png) | ![preview 8](31/preview_8.png) | | 32 | 15 | [Download](32/dataset.zip) | ![preview 1](32/preview_1.png) | ![preview 2](32/preview_2.png) | ![preview 3](32/preview_3.png) | ![preview 4](32/preview_4.png) | ![preview 5](32/preview_5.png) | ![preview 6](32/preview_6.png) | ![preview 7](32/preview_7.png) | ![preview 8](32/preview_8.png) | | 33 | 12 | [Download](33/dataset.zip) | ![preview 1](33/preview_1.png) | ![preview 2](33/preview_2.png) | ![preview 3](33/preview_3.png) | ![preview 4](33/preview_4.png) | ![preview 5](33/preview_5.png) | ![preview 6](33/preview_6.png) | ![preview 7](33/preview_7.png) | ![preview 8](33/preview_8.png) | | noise | 565 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/shinmaimaounotestament
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-05T18:20:56+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-05T20:15:54+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Shinmai Maou No Testament =============================================== This is the image base of bangumi Shinmai Maou no Testament, we detected 35 characters, 3166 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
517cdebf9dd3f2892af878622bb541c58a3b4f8c
# Bangumi Image Base of God Eater This is the image base of bangumi GOD EATER, we detected 23 characters, 1589 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 31 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 6 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | N/A | N/A | | 2 | 176 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 22 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 49 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 527 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 32 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 28 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 15 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 17 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 50 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 124 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 26 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 24 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 10 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 9 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 24 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 61 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 121 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 131 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 16 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 6 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | N/A | N/A | | noise | 84 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/godeater
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-05T18:21:12+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-05T19:37:58+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of God Eater =============================== This is the image base of bangumi GOD EATER, we detected 23 characters, 1589 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
e748f910e51b9fc3b5f8df3e39d9444c8e7fd57d
# Dataset Card for "traindataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
AryanNsc/traindataset
[ "region:us" ]
2023-10-05T18:22:45+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 98819, "num_examples": 1000}], "download_size": 58464, "dataset_size": 98819}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-05T18:29:51+00:00
[]
[]
TAGS #region-us
# Dataset Card for "traindataset" More Information needed
[ "# Dataset Card for \"traindataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"traindataset\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"traindataset\"\n\nMore Information needed" ]
886aba7228d9c54a1ba839986931adb77a9007b4
# Dataset Card for "jigsaw-train-es" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Intuit-GenSRF/jigsaw-toxic-comment-train-es
[ "region:us" ]
2023-10-05T18:27:29+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "labels", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 98022741, "num_examples": 223378}], "download_size": 60601678, "dataset_size": 98022741}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-05T18:27:34+00:00
[]
[]
TAGS #region-us
# Dataset Card for "jigsaw-train-es" More Information needed
[ "# Dataset Card for \"jigsaw-train-es\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"jigsaw-train-es\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"jigsaw-train-es\"\n\nMore Information needed" ]
580dc69291610f229c1b9375247eaa92a58cc78f
# Dataset Card for "jigsaw-train-fr" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Intuit-GenSRF/jigsaw-toxic-comment-train-fr
[ "region:us" ]
2023-10-05T18:27:39+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "labels", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 104185684, "num_examples": 223394}], "download_size": 63513621, "dataset_size": 104185684}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-05T18:27:43+00:00
[]
[]
TAGS #region-us
# Dataset Card for "jigsaw-train-fr" More Information needed
[ "# Dataset Card for \"jigsaw-train-fr\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"jigsaw-train-fr\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"jigsaw-train-fr\"\n\nMore Information needed" ]
d8aa55337f024bec5bf5e559f2ad38de87120b27
The purpose of this dataset is to have a question - answer (ground truth) in a table format. The question and answer are all created by using langchain x gpt-4 since it will take a long time for me to create it manually. However, as a due diligent, I have checked randomly more than 50% of the questions and answers, and judged that it is safe to use. The source of this questions and answer is from a private wiki page called Sustainable Methods Wiki, created by Prof. Henrik .v. Wahrden. Link: https://sustainabilitymethods.org/index.php/Main_Page
stepkurniawan/qa_sustainability_wiki
[ "license:mit", "region:us" ]
2023-10-05T18:45:20+00:00
{"license": "mit", "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "ground_truths", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 195625.12855377008, "num_examples": 647}, {"name": "test", "num_bytes": 48981.87144622991, "num_examples": 162}], "download_size": 149066, "dataset_size": 244607.0}}
2023-10-05T19:51:30+00:00
[]
[]
TAGS #license-mit #region-us
The purpose of this dataset is to have a question - answer (ground truth) in a table format. The question and answer are all created by using langchain x gpt-4 since it will take a long time for me to create it manually. However, as a due diligent, I have checked randomly more than 50% of the questions and answers, and judged that it is safe to use. The source of this questions and answer is from a private wiki page called Sustainable Methods Wiki, created by Prof. Henrik .v. Wahrden. Link: URL
[]
[ "TAGS\n#license-mit #region-us \n" ]
[ 11 ]
[ "passage: TAGS\n#license-mit #region-us \n" ]
f4a6c075f5bed1ef53c3551537d964d720426e5d
# Dataset Card for "demo-new" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
atulsinghphd/demo-new
[ "region:us" ]
2023-10-05T19:03:39+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 32616.0, "num_examples": 172}, {"name": "test", "num_bytes": 8154.0, "num_examples": 43}], "download_size": 12874, "dataset_size": 40770.0}}
2023-10-05T19:03:41+00:00
[]
[]
TAGS #region-us
# Dataset Card for "demo-new" More Information needed
[ "# Dataset Card for \"demo-new\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"demo-new\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"demo-new\"\n\nMore Information needed" ]
ca599a559fdd81834a4089af3d037ce46e7d8198
# Dataset Card for "gsm8k_self_correct" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
euclaise/gsm8k_self_correct
[ "size_categories:1K<n<10K", "license:mit", "cot", "self-correct", "region:us" ]
2023-10-05T19:15:09+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "mistake", "dtype": "string"}, {"name": "correct_end", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4561402, "num_examples": 4676}], "download_size": 2528831, "dataset_size": 4561402}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "tags": ["cot", "self-correct"]}
2023-10-19T19:46:04+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #cot #self-correct #region-us
# Dataset Card for "gsm8k_self_correct" More Information needed
[ "# Dataset Card for \"gsm8k_self_correct\"\n\nMore Information needed" ]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #cot #self-correct #region-us \n", "# Dataset Card for \"gsm8k_self_correct\"\n\nMore Information needed" ]
[ 30, 19 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #cot #self-correct #region-us \n# Dataset Card for \"gsm8k_self_correct\"\n\nMore Information needed" ]
c188f4a1e0868799de30b0e0e65d4ca016213412
# Dataset Card for "fill50k" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
acozma/fill50k
[ "region:us" ]
2023-10-05T19:22:49+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "conditioning_image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 451820630.0, "num_examples": 50000}], "download_size": 323967497, "dataset_size": 451820630.0}}
2023-10-05T20:31:15+00:00
[]
[]
TAGS #region-us
# Dataset Card for "fill50k" More Information needed
[ "# Dataset Card for \"fill50k\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"fill50k\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"fill50k\"\n\nMore Information needed" ]
baacbff5ac14e6daf38d1c76bc1c8e42d83b1dec
# Bangumi Image Base of Sangatsu No Lion This is the image base of bangumi Sangatsu no Lion, we detected 33 characters, 3830 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 1087 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 167 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 205 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 49 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 126 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 39 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 179 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 96 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 264 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 111 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 29 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 34 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 19 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 44 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 56 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 27 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 28 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 405 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 203 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 13 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 16 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 142 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 20 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 8 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 23 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 23 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 46 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 55 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 9 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | ![preview 8](28/preview_8.png) | | 29 | 8 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | ![preview 7](29/preview_7.png) | ![preview 8](29/preview_8.png) | | 30 | 39 | [Download](30/dataset.zip) | ![preview 1](30/preview_1.png) | ![preview 2](30/preview_2.png) | ![preview 3](30/preview_3.png) | ![preview 4](30/preview_4.png) | ![preview 5](30/preview_5.png) | ![preview 6](30/preview_6.png) | ![preview 7](30/preview_7.png) | ![preview 8](30/preview_8.png) | | 31 | 9 | [Download](31/dataset.zip) | ![preview 1](31/preview_1.png) | ![preview 2](31/preview_2.png) | ![preview 3](31/preview_3.png) | ![preview 4](31/preview_4.png) | ![preview 5](31/preview_5.png) | ![preview 6](31/preview_6.png) | ![preview 7](31/preview_7.png) | ![preview 8](31/preview_8.png) | | noise | 251 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/sangatsunolion
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-05T19:54:08+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-05T22:19:37+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Sangatsu No Lion ====================================== This is the image base of bangumi Sangatsu no Lion, we detected 33 characters, 3830 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
dba2897820c01d6bd887101a64d2e6544c509d0a
# Bangumi Image Base of Mahou Tsukai No Yome This is the image base of bangumi Mahou Tsukai no Yome, we detected 28 characters, 1731 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 899 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 20 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 11 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 44 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 39 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 19 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 19 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 17 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 20 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 37 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 20 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 15 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 13 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 14 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 85 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 14 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 34 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 60 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 58 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 5 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | N/A | N/A | N/A | | 20 | 19 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 13 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 18 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 12 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 16 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 7 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | N/A | | 26 | 21 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | noise | 182 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/mahoutsukainoyome
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-05T20:09:24+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-05T21:27:53+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Mahou Tsukai No Yome ========================================== This is the image base of bangumi Mahou Tsukai no Yome, we detected 28 characters, 1731 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
035d53756984e38e3f557e394c907154a76a2347
# Dataset Card for "LSC_acronyms_topic_vectors" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tomashs/LSC_acronyms_topic_vectors
[ "region:us" ]
2023-10-05T20:27:56+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "short_form", "dtype": "string"}, {"name": "long_form", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "topic_vector", "sequence": "float64"}], "splits": [{"name": "train", "num_bytes": 1959752089, "num_examples": 352720}, {"name": "validation", "num_bytes": 418571627, "num_examples": 75339}, {"name": "test", "num_bytes": 419813918, "num_examples": 75540}], "download_size": 2198337547, "dataset_size": 2798137634}}
2023-10-05T20:38:49+00:00
[]
[]
TAGS #region-us
# Dataset Card for "LSC_acronyms_topic_vectors" More Information needed
[ "# Dataset Card for \"LSC_acronyms_topic_vectors\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"LSC_acronyms_topic_vectors\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"LSC_acronyms_topic_vectors\"\n\nMore Information needed" ]
30086c760dd66a6954ad29ff9c7ee1353217abbf
# Dataset Card for "DedupedRefDatasetWMetric" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Fraol/DedupedRefDatasetWMetric
[ "region:us" ]
2023-10-05T21:13:56+00:00
{"dataset_info": {"features": [{"name": "source", "dtype": "string"}, {"name": "path_name", "dtype": "string"}, {"name": "file_name", "dtype": "string"}, {"name": "ref_type", "dtype": "string"}, {"name": "ref_status", "dtype": "string"}, {"name": "hash", "dtype": "string"}, {"name": "class_name", "dtype": "string"}, {"name": "method_name", "dtype": "string"}, {"name": "row_number", "dtype": "int64"}, {"name": "cbo", "dtype": "float64"}, {"name": "wmc", "dtype": "float64"}, {"name": "lcom*", "dtype": "float64"}, {"name": "loc", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 2308835214, "num_examples": 385811}], "download_size": 481911376, "dataset_size": 2308835214}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-05T21:14:17+00:00
[]
[]
TAGS #region-us
# Dataset Card for "DedupedRefDatasetWMetric" More Information needed
[ "# Dataset Card for \"DedupedRefDatasetWMetric\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"DedupedRefDatasetWMetric\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"DedupedRefDatasetWMetric\"\n\nMore Information needed" ]
a23a48db652829efa4a6542f16f732f42ba47bfc
# Dataset Card for "SD-CLIP-alignment-3000-T5" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Doub7e/SD-CLIP-alignment-3000-T5
[ "region:us" ]
2023-10-05T21:14:30+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "prompt", "dtype": "string"}, {"name": "clip_pred", "dtype": "string"}, {"name": "T5_last_hidden_states", "sequence": {"sequence": {"sequence": "float32"}}}], "splits": [{"name": "train", "num_bytes": 1414249606.0, "num_examples": 3000}], "download_size": 1392469728, "dataset_size": 1414249606.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-05T21:18:08+00:00
[]
[]
TAGS #region-us
# Dataset Card for "SD-CLIP-alignment-3000-T5" More Information needed
[ "# Dataset Card for \"SD-CLIP-alignment-3000-T5\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"SD-CLIP-alignment-3000-T5\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"SD-CLIP-alignment-3000-T5\"\n\nMore Information needed" ]
9cb849d53abbe7fd7e4769db215525b7d47bc373
# t331ZZZM * [data](https://huggingface.co/datasets/feynman-integrals-nn/t331ZZZM-s12_24) * [model](https://huggingface.co/feynman-integrals-nn/t331ZZZM-dimensionless) * [source](https://gitlab.com/feynman-integrals-nn/feynman-integrals-nn/-/tree/main/t331ZZZM) Warning: deprecated dataset
feynman-integrals-nn/t331ZZZM-s12_24
[ "license:cc-by-4.0", "region:us" ]
2023-10-05T21:31:36+00:00
{"license": "cc-by-4.0"}
2023-10-17T16:20:44+00:00
[]
[]
TAGS #license-cc-by-4.0 #region-us
# t331ZZZM * data * model * source Warning: deprecated dataset
[ "# t331ZZZM\n\n* data\n* model\n* source\n\nWarning: deprecated dataset" ]
[ "TAGS\n#license-cc-by-4.0 #region-us \n", "# t331ZZZM\n\n* data\n* model\n* source\n\nWarning: deprecated dataset" ]
[ 15, 22 ]
[ "passage: TAGS\n#license-cc-by-4.0 #region-us \n# t331ZZZM\n\n* data\n* model\n* source\n\nWarning: deprecated dataset" ]
59de72abd641b344034f6f04d49db0737a195d37
# heavycrossbox * [data](https://huggingface.co/datasets/feynman-integrals-nn/heavycrossbox) * [source](https://gitlab.com/feynman-integrals-nn/feynman-integrals-nn/-/tree/main/heavycrossbox)
feynman-integrals-nn/heavycrossbox
[ "license:cc-by-4.0", "region:us" ]
2023-10-05T21:54:33+00:00
{"license": "cc-by-4.0"}
2023-11-14T20:56:11+00:00
[]
[]
TAGS #license-cc-by-4.0 #region-us
# heavycrossbox * data * source
[ "# heavycrossbox\n\n* data\n* source" ]
[ "TAGS\n#license-cc-by-4.0 #region-us \n", "# heavycrossbox\n\n* data\n* source" ]
[ 15, 8 ]
[ "passage: TAGS\n#license-cc-by-4.0 #region-us \n# heavycrossbox\n\n* data\n* source" ]
d6887ae6cba5c64837121431fa52b7c9556c3bc3
# topbox * [data](https://huggingface.co/datasets/feynman-integrals-nn/topbox) * [source](https://gitlab.com/feynman-integrals-nn/feynman-integrals-nn/-/tree/main/topbox)
feynman-integrals-nn/topbox
[ "license:cc-by-4.0", "region:us" ]
2023-10-05T21:54:49+00:00
{"license": "cc-by-4.0"}
2023-11-23T09:44:58+00:00
[]
[]
TAGS #license-cc-by-4.0 #region-us
# topbox * data * source
[ "# topbox\n\n* data\n* source" ]
[ "TAGS\n#license-cc-by-4.0 #region-us \n", "# topbox\n\n* data\n* source" ]
[ 15, 7 ]
[ "passage: TAGS\n#license-cc-by-4.0 #region-us \n# topbox\n\n* data\n* source" ]
e51a192cccf1fb1ca8983f76326e03739a79d1a3
# Dataset Card for "ncbi_genbank_part_4" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Hack90/ncbi_genbank_part_4
[ "region:us" ]
2023-10-05T21:59:49+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "sequence", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "description", "dtype": "string"}, {"name": "features", "dtype": "int64"}, {"name": "seq_length", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 21065842848, "num_examples": 52663}], "download_size": 9755396603, "dataset_size": 21065842848}}
2023-10-05T22:07:33+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ncbi_genbank_part_4" More Information needed
[ "# Dataset Card for \"ncbi_genbank_part_4\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ncbi_genbank_part_4\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ncbi_genbank_part_4\"\n\nMore Information needed" ]
22a4e05a9111c436a0294342e0a89277f93581b7
# box1m * [data](https://huggingface.co/datasets/feynman-integrals-nn/box1m) * [source](https://gitlab.com/feynman-integrals-nn/feynman-integrals-nn/-/tree/main/box1m)
feynman-integrals-nn/box1m
[ "license:cc-by-4.0", "region:us" ]
2023-10-05T22:02:24+00:00
{"license": "cc-by-4.0"}
2023-10-06T09:12:45+00:00
[]
[]
TAGS #license-cc-by-4.0 #region-us
# box1m * data * source
[ "# box1m\n\n* data\n* source" ]
[ "TAGS\n#license-cc-by-4.0 #region-us \n", "# box1m\n\n* data\n* source" ]
[ 15, 8 ]
[ "passage: TAGS\n#license-cc-by-4.0 #region-us \n# box1m\n\n* data\n* source" ]
4e67b7d4e9f648a39e74a8fdcdbfb6514fc7e59f
# Dataset Card for "jigsaw-unintended-bias" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Intuit-GenSRF/jigsaw-unintended-bias
[ "region:us" ]
2023-10-05T22:13:50+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "labels", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 613511925, "num_examples": 1999516}], "download_size": 417235573, "dataset_size": 613511925}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-05T22:16:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "jigsaw-unintended-bias" More Information needed
[ "# Dataset Card for \"jigsaw-unintended-bias\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"jigsaw-unintended-bias\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"jigsaw-unintended-bias\"\n\nMore Information needed" ]
1d6700a8fd845125e2f908aa986d91801f768517
# Dataset Card for "hmbxp_llvm_wasm" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
JeremiahZ/mbxp_llvm_wasm
[ "region:us" ]
2023-10-05T22:22:30+00:00
{"dataset_info": {"features": [{"name": "task_id", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "prompt", "dtype": "string"}, {"name": "description", "dtype": "string"}, {"name": "test", "dtype": "string"}, {"name": "entry_point", "dtype": "string"}, {"name": "canonical_solution", "dtype": "string"}, {"name": "llvm_ir", "dtype": "string"}, {"name": "wat", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 13548211, "num_examples": 773}], "download_size": 2857975, "dataset_size": 13548211}, "configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}]}
2023-10-05T22:22:33+00:00
[]
[]
TAGS #region-us
# Dataset Card for "hmbxp_llvm_wasm" More Information needed
[ "# Dataset Card for \"hmbxp_llvm_wasm\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"hmbxp_llvm_wasm\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"hmbxp_llvm_wasm\"\n\nMore Information needed" ]
8ac2085708c83ecedbe79ff85405681cd0cfc938
# Dataset Card for "sst2_affix_pos" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
joey234/sst2_affix_pos
[ "region:us" ]
2023-10-05T22:27:32+00:00
{"dataset_info": {"features": [{"name": "idx", "dtype": "int32"}, {"name": "sentence", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "negative", "1": "positive"}}}}, {"name": "words_with_affixes", "sequence": "string"}], "splits": [{"name": "validation", "num_bytes": 8687, "num_examples": 55}], "download_size": 9498, "dataset_size": 8687}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-10-23T01:28:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sst2_affix_pos" More Information needed
[ "# Dataset Card for \"sst2_affix_pos\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sst2_affix_pos\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"sst2_affix_pos\"\n\nMore Information needed" ]
a89ab76d88f704e9bc5870ac39cc9d458a2a70ac
# 📘 WikiMIA Datasets The **WikiMIA datasets** serve as a benchmark designed to evaluate membership inference attack (MIA) methods, specifically in detecting pretraining data from extensive large language models. ### 📌 Applicability The datasets can be applied to various models released between **2017 to 2023**: - LLaMA1/2 - GPT-Neo - OPT - Pythia - text-davinci-001 - text-davinci-002 - ... and more. ## Loading the datasets To load the dataset: ```python from datasets import load_dataset LENGTH = 64 dataset = load_dataset("swj0419/WikiMIA", split=f"WikiMIA_length{LENGTH}") ``` * Available Text Lengths: `32, 64, 128, 256`. * *Label 0*: Refers to the unseen data during pretraining. *Label 1*: Refers to the seen data. ## 🛠️ Codebase For evaluating MIA methods on our datasets, visit our [GitHub repository](https://github.com/swj0419/detect-pretrain-code). ## ⭐ Citing our Work If you find our codebase and datasets beneficial, kindly cite our work: ```bibtex @misc{shi2023detecting, title={Detecting Pretraining Data from Large Language Models}, author={Weijia Shi and Anirudh Ajith and Mengzhou Xia and Yangsibo Huang and Daogao Liu and Terra Blevins and Danqi Chen and Luke Zettlemoyer}, year={2023}, eprint={2310.16789}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
swj0419/WikiMIA
[ "size_categories:1K<n<10K", "language:en", "license:mit", "arxiv:2310.16789", "region:us" ]
2023-10-05T22:31:10+00:00
{"language": ["en"], "license": "mit", "size_categories": ["1K<n<10K"], "dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "WikiMIA_length32", "num_bytes": 162091, "num_examples": 776}, {"name": "WikiMIA_length64", "num_bytes": 221018, "num_examples": 542}, {"name": "WikiMIA_length128", "num_bytes": 205118, "num_examples": 250}, {"name": "WikiMIA_length256", "num_bytes": 134879, "num_examples": 82}], "download_size": 465221, "dataset_size": 723106}}
2023-11-03T07:52:38+00:00
[ "2310.16789" ]
[ "en" ]
TAGS #size_categories-1K<n<10K #language-English #license-mit #arxiv-2310.16789 #region-us
# WikiMIA Datasets The WikiMIA datasets serve as a benchmark designed to evaluate membership inference attack (MIA) methods, specifically in detecting pretraining data from extensive large language models. ### Applicability The datasets can be applied to various models released between 2017 to 2023: - LLaMA1/2 - GPT-Neo - OPT - Pythia - text-davinci-001 - text-davinci-002 - ... and more. ## Loading the datasets To load the dataset: * Available Text Lengths: '32, 64, 128, 256'. * *Label 0*: Refers to the unseen data during pretraining. *Label 1*: Refers to the seen data. ## ️ Codebase For evaluating MIA methods on our datasets, visit our GitHub repository. ## ⭐ Citing our Work If you find our codebase and datasets beneficial, kindly cite our work:
[ "# WikiMIA Datasets\n\nThe WikiMIA datasets serve as a benchmark designed to evaluate membership inference attack (MIA) methods, specifically in detecting pretraining data from extensive large language models.", "### Applicability\n\nThe datasets can be applied to various models released between 2017 to 2023:\n\n- LLaMA1/2\n- GPT-Neo\n- OPT\n- Pythia\n- text-davinci-001\n- text-davinci-002\n- ... and more.", "## Loading the datasets\n\nTo load the dataset:\n\n\n* Available Text Lengths: '32, 64, 128, 256'.\n* *Label 0*: Refers to the unseen data during pretraining. *Label 1*: Refers to the seen data.", "## ️ Codebase\n\nFor evaluating MIA methods on our datasets, visit our GitHub repository.", "## ⭐ Citing our Work\n\nIf you find our codebase and datasets beneficial, kindly cite our work:" ]
[ "TAGS\n#size_categories-1K<n<10K #language-English #license-mit #arxiv-2310.16789 #region-us \n", "# WikiMIA Datasets\n\nThe WikiMIA datasets serve as a benchmark designed to evaluate membership inference attack (MIA) methods, specifically in detecting pretraining data from extensive large language models.", "### Applicability\n\nThe datasets can be applied to various models released between 2017 to 2023:\n\n- LLaMA1/2\n- GPT-Neo\n- OPT\n- Pythia\n- text-davinci-001\n- text-davinci-002\n- ... and more.", "## Loading the datasets\n\nTo load the dataset:\n\n\n* Available Text Lengths: '32, 64, 128, 256'.\n* *Label 0*: Refers to the unseen data during pretraining. *Label 1*: Refers to the seen data.", "## ️ Codebase\n\nFor evaluating MIA methods on our datasets, visit our GitHub repository.", "## ⭐ Citing our Work\n\nIf you find our codebase and datasets beneficial, kindly cite our work:" ]
[ 36, 44, 58, 60, 26, 26 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #language-English #license-mit #arxiv-2310.16789 #region-us \n# WikiMIA Datasets\n\nThe WikiMIA datasets serve as a benchmark designed to evaluate membership inference attack (MIA) methods, specifically in detecting pretraining data from extensive large language models.### Applicability\n\nThe datasets can be applied to various models released between 2017 to 2023:\n\n- LLaMA1/2\n- GPT-Neo\n- OPT\n- Pythia\n- text-davinci-001\n- text-davinci-002\n- ... and more.## Loading the datasets\n\nTo load the dataset:\n\n\n* Available Text Lengths: '32, 64, 128, 256'.\n* *Label 0*: Refers to the unseen data during pretraining. *Label 1*: Refers to the seen data.## ️ Codebase\n\nFor evaluating MIA methods on our datasets, visit our GitHub repository.## ⭐ Citing our Work\n\nIf you find our codebase and datasets beneficial, kindly cite our work:" ]
06e20be9a34b9f18844965cb06dcfef6c796c606
# Dataset Card for "rotten_tomatoes_affix_pos" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
joey234/rotten_tomatoes_affix_pos
[ "region:us" ]
2023-10-05T22:37:39+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "neg", "1": "pos"}}}}, {"name": "words_with_affixes", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 12606, "num_examples": 73}], "download_size": 10849, "dataset_size": 12606}, "configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}]}
2023-10-23T01:29:05+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rotten_tomatoes_affix_pos" More Information needed
[ "# Dataset Card for \"rotten_tomatoes_affix_pos\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rotten_tomatoes_affix_pos\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rotten_tomatoes_affix_pos\"\n\nMore Information needed" ]
3e75fd3444b3f4c22a99781cf6a7d956d57ae56d
<img src="https://cdn-uploads.huggingface.co/production/uploads/6434a6e8ea46c009904c617e/J_4FHXmtM6TuRnN3aL06y.png" width="38" height="38"> This is the training dataset of **MUFFIN** (**Mu**lti-**F**aceted **In**structions). Please refer to our project website for more details: [Website](https://renzelou.github.io/Muffin/) ## JSON Format The download data can be read as a Python list. In this list, each elemental Python dictionary has one input text. This input text has multiple task instructions and the corresponding outputs. ```json [ { "input": "XXX", "instances": [ { "instruction": "III", "output": "YYY" }, { "instruction": "III", "output": "YYY" } ] } , { "input": "XXX", "instances": [ { "instruction": "III", "output": "YYY" } ] } ] ``` ## Data Statistics There are a total of 1,463 input texts, where each input is equipped with multiple task instructions (~46.48 instructions per input), resulting in **68,014** training instances in total. The detailed statistics are shown below: <div style="text-align:center"><img src="https://cdn-uploads.huggingface.co/production/uploads/6434a6e8ea46c009904c617e/hcQjRr1TqX08C4tMnEQaZ.png" alt="statistics.png" width="500"/></div> ## 🥳 Citation Please kindly cite our paper if you use our dataset: ```bibtex @inproceedings{Lou2023MUFFIN, title={{MUFFIN}: Curating Multi-Faceted Instructions for Improving Instruction Following}, author={Renze Lou and Kai Zhang and Jian Xie and Yuxuan Sun and Janice Ahn and Hanzi Xu and Yu su and Wenpeng Yin}, booktitle={The Twelfth International Conference on Learning Representations}, year={2024}, url={https://openreview.net/forum?id=1vrS1zwekw} } ```
Reza8848/MUFFIN_68k
[ "size_categories:10K<n<100K", "language:en", "region:us" ]
2023-10-05T22:41:51+00:00
{"language": ["en"], "size_categories": ["10K<n<100K"]}
2024-01-24T22:08:55+00:00
[]
[ "en" ]
TAGS #size_categories-10K<n<100K #language-English #region-us
<img src="URL width="38" height="38"> This is the training dataset of MUFFIN (Multi-Faceted Instructions). Please refer to our project website for more details: Website ## JSON Format The download data can be read as a Python list. In this list, each elemental Python dictionary has one input text. This input text has multiple task instructions and the corresponding outputs. ## Data Statistics There are a total of 1,463 input texts, where each input is equipped with multiple task instructions (~46.48 instructions per input), resulting in 68,014 training instances in total. The detailed statistics are shown below: <div style="text-align:center"><img src="URL alt="URL" width="500"/></div> ## Citation Please kindly cite our paper if you use our dataset:
[ "## JSON Format\n\nThe download data can be read as a Python list.\n\nIn this list, each elemental Python dictionary has one input text.\n\nThis input text has multiple task instructions and the corresponding outputs.", "## Data Statistics\n\nThere are a total of 1,463 input texts, where each input is equipped with multiple task instructions (~46.48 instructions per input), resulting in 68,014 training instances in total. \n\nThe detailed statistics are shown below:\n\n<div style=\"text-align:center\"><img src=\"URL alt=\"URL\" width=\"500\"/></div>", "## Citation\n\nPlease kindly cite our paper if you use our dataset:" ]
[ "TAGS\n#size_categories-10K<n<100K #language-English #region-us \n", "## JSON Format\n\nThe download data can be read as a Python list.\n\nIn this list, each elemental Python dictionary has one input text.\n\nThis input text has multiple task instructions and the corresponding outputs.", "## Data Statistics\n\nThere are a total of 1,463 input texts, where each input is equipped with multiple task instructions (~46.48 instructions per input), resulting in 68,014 training instances in total. \n\nThe detailed statistics are shown below:\n\n<div style=\"text-align:center\"><img src=\"URL alt=\"URL\" width=\"500\"/></div>", "## Citation\n\nPlease kindly cite our paper if you use our dataset:" ]
[ 22, 45, 84, 16 ]
[ "passage: TAGS\n#size_categories-10K<n<100K #language-English #region-us \n## JSON Format\n\nThe download data can be read as a Python list.\n\nIn this list, each elemental Python dictionary has one input text.\n\nThis input text has multiple task instructions and the corresponding outputs.## Data Statistics\n\nThere are a total of 1,463 input texts, where each input is equipped with multiple task instructions (~46.48 instructions per input), resulting in 68,014 training instances in total. \n\nThe detailed statistics are shown below:\n\n<div style=\"text-align:center\"><img src=\"URL alt=\"URL\" width=\"500\"/></div>## Citation\n\nPlease kindly cite our paper if you use our dataset:" ]
6dd7ff45c2c2aaab1a27fa084a1d32dd4e539e8b
# Dataset Card for "tweet_eval_affix_pos" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
joey234/tweet_eval_affix_pos
[ "region:us" ]
2023-10-05T22:43:59+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "negative", "1": "neutral", "2": "positive"}}}}, {"name": "words_with_affixes", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 76536, "num_examples": 609}], "download_size": 49267, "dataset_size": 76536}, "configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}]}
2023-10-23T01:30:18+00:00
[]
[]
TAGS #region-us
# Dataset Card for "tweet_eval_affix_pos" More Information needed
[ "# Dataset Card for \"tweet_eval_affix_pos\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"tweet_eval_affix_pos\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"tweet_eval_affix_pos\"\n\nMore Information needed" ]
d3b0d0de643bcc89ce14c5bd71c2f9097ea2fa36
# Dataset Card for "retrieval-augment-finetune" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
approach0/retrieval-augment-finetune.old
[ "region:us" ]
2023-10-05T22:44:15+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "note", "dtype": "string"}, {"name": "problem_id", "dtype": "string"}, {"name": "problem", "dtype": "string"}, {"name": "solution", "dtype": "string"}, {"name": "aug_query", "dtype": "string"}, {"name": "aug_result", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "correct", "dtype": "bool"}, {"name": "relevance", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 5910768, "num_examples": 2649}], "download_size": 0, "dataset_size": 5910768}}
2023-10-12T22:22:47+00:00
[]
[]
TAGS #region-us
# Dataset Card for "retrieval-augment-finetune" More Information needed
[ "# Dataset Card for \"retrieval-augment-finetune\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"retrieval-augment-finetune\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"retrieval-augment-finetune\"\n\nMore Information needed" ]
3112a5908d916f84cf9b56d99cd8877d3227777d
# Dataset Card for "three_styles_10rand" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
kewu93/three_styles_10rand
[ "region:us" ]
2023-10-05T22:47:16+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "val", "path": "data/val-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 321384.41333333333, "num_examples": 10}, {"name": "val", "num_bytes": 2935082.1333333333, "num_examples": 100}], "download_size": 3157886, "dataset_size": 3256466.546666667}}
2023-10-05T22:47:22+00:00
[]
[]
TAGS #region-us
# Dataset Card for "three_styles_10rand" More Information needed
[ "# Dataset Card for \"three_styles_10rand\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"three_styles_10rand\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"three_styles_10rand\"\n\nMore Information needed" ]
52bf6991a04e028580aaefbd797860a0248302c9
# Dataset Card for "53f478ab" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
result-kand2-sdxl-wuerst-karlo/53f478ab
[ "region:us" ]
2023-10-05T23:13:19+00:00
{"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 257, "num_examples": 10}], "download_size": 1433, "dataset_size": 257}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-05T23:13:20+00:00
[]
[]
TAGS #region-us
# Dataset Card for "53f478ab" More Information needed
[ "# Dataset Card for \"53f478ab\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"53f478ab\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"53f478ab\"\n\nMore Information needed" ]
fad8b2e7770ebd4b48259c0f801d1986c5a3cd5e
# Dataset Card for "jigsaw-unintended-bias-train-es" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Intuit-GenSRF/jigsaw-unintended-bias-train-es
[ "region:us" ]
2023-10-05T23:14:32+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "labels", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 651762756, "num_examples": 1900137}], "download_size": 421521895, "dataset_size": 651762756}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-05T23:14:53+00:00
[]
[]
TAGS #region-us
# Dataset Card for "jigsaw-unintended-bias-train-es" More Information needed
[ "# Dataset Card for \"jigsaw-unintended-bias-train-es\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"jigsaw-unintended-bias-train-es\"\n\nMore Information needed" ]
[ 6, 25 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"jigsaw-unintended-bias-train-es\"\n\nMore Information needed" ]
38971d1b96e02240ad59fae28319c25e2faf9770
# Dataset Card for "jigsaw-unintended-bias-train-fr" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Intuit-GenSRF/jigsaw-unintended-bias-train-fr
[ "region:us" ]
2023-10-05T23:16:11+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "labels", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 688756878, "num_examples": 1900136}], "download_size": 439186843, "dataset_size": 688756878}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-05T23:16:34+00:00
[]
[]
TAGS #region-us
# Dataset Card for "jigsaw-unintended-bias-train-fr" More Information needed
[ "# Dataset Card for \"jigsaw-unintended-bias-train-fr\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"jigsaw-unintended-bias-train-fr\"\n\nMore Information needed" ]
[ 6, 25 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"jigsaw-unintended-bias-train-fr\"\n\nMore Information needed" ]
c517413ba39d78b6e9c872687f27e036f488b9a7
# Dataset Card for "ncbi_genbank_part_6" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Hack90/ncbi_genbank_part_6
[ "region:us" ]
2023-10-05T23:16:47+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "sequence", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "description", "dtype": "string"}, {"name": "features", "dtype": "int64"}, {"name": "seq_length", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 20271283259, "num_examples": 83750}], "download_size": 9364736791, "dataset_size": 20271283259}}
2023-10-05T23:25:00+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ncbi_genbank_part_6" More Information needed
[ "# Dataset Card for \"ncbi_genbank_part_6\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ncbi_genbank_part_6\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ncbi_genbank_part_6\"\n\nMore Information needed" ]
33bf8a2fdb11f9a9cebcef7e395480a089e494c2
# Dataset Card for "sketch_100" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
kewu93/sketch_100
[ "region:us" ]
2023-10-05T23:23:57+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "val", "path": "data/val-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1431746.0, "num_examples": 100}, {"name": "val", "num_bytes": 1431746.0, "num_examples": 100}], "download_size": 2858841, "dataset_size": 2863492.0}}
2023-10-05T23:24:00+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sketch_100" More Information needed
[ "# Dataset Card for \"sketch_100\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sketch_100\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"sketch_100\"\n\nMore Information needed" ]
ce3459ab809a74024a42ca90486d32a218e3cc12
# Dataset Card for "yugioh-crystal-beast-ready" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mharvill23/yugioh-crystal-beast-ready
[ "region:us" ]
2023-10-06T00:09:06+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 845968.0, "num_examples": 15}], "download_size": 847374, "dataset_size": 845968.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-06T00:09:51+00:00
[]
[]
TAGS #region-us
# Dataset Card for "yugioh-crystal-beast-ready" More Information needed
[ "# Dataset Card for \"yugioh-crystal-beast-ready\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"yugioh-crystal-beast-ready\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"yugioh-crystal-beast-ready\"\n\nMore Information needed" ]
619e657f7e8eda0ae4c6a19824cfd0d8479c9fd2
# AutoTrain Dataset for project: test13 ## Dataset Description This dataset has been processed for project BDA594-fake-news-classification model. ## Dataset Structure ### Data Instances A sample from this dataset looks as follows: ```json [ { "text": "MOSCOW\u2014Warning President Volodymyr Zelensky think carefully repercussions changing country\u2019s college football conference alignment, Russian president Vladimir Putin reportedly vowed retaliation Friday Ukraine ever become member Big Ten. \u201cThere absolutely reason Ukraine needs join Big Ten outside provoking Russia, move apply membership met harshest consequences,\u201d said Putin, pointing Ukraine\u2019s position neutral firewall countries long served geographical buffer Russia Big Ten members like Indiana University Rutgers. \u201cAny move even increase number games Ukraine Big Ten schools considered act aggression Russian state met proportionate force. universities stoking hostility Russia decades, caution Ukraine indulge thirst power. Frankly, Zelensky\u2019s good. know University Michigan cofounded Big Ten years ago vehicle nefarious interests NCAA Division sports, urge Ukraine become puppet [University Michigan athletic director] Alan Haller prolonged proxy season Big Ten schools Russia.\u201d press time, tensions rising Russia pulled ambassador Penn State. Watch Biden Asks Americans Come Sit Keep Company End CC Share Subtitles English Share Video Facebook Twitter Email Reddit Link view video Biden Asks Americans Come Sit Keep Company End Week's Viral News: September 22, 2023 Friday 2:58PM Study Finds LSD Highly Effective Ruining Nephew\u2019s Baptism Thursday 10:44AM", "target": 0 }, { "text": "Coast Guard searching 35yearold man fell overboard Carnival cruise ship near Florida Monday, according statement . man passenger Carnival Magic cruise ship, 186 miles east Jacksonville fell water. Security footage shows leaned railing room balcony fell water around 4:10 a.m. Monday, cruise corporation said statement. reported missing companion late Monday afternoon. Officials released identifying information man age. cruise ship released Coast Guard search rescue efforts continue way Norfolk, Virginia, scheduled arrive Tuesday. \"The Carnival Care Team providing support guest\u2019s companion traveling party board,\" Carnival said. Coast Guard using air water assets search passenger.", "target": 1 } ] ``` ### Dataset Fields The dataset has the following fields (also called "features"): ```json { "text": "Value(dtype='string', id=None)", "target": "ClassLabel(names=['fake', 'real'], id=None)" } ``` ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow: | Split name | Num samples | | ------------ | ------------------- | | train | 508 | | valid | 128 |
Kaludi/BDA594-fake-news-classification
[ "task_categories:text-classification", "region:us" ]
2023-10-06T00:25:31+00:00
{"task_categories": ["text-classification"]}
2023-10-06T00:59:55+00:00
[]
[]
TAGS #task_categories-text-classification #region-us
AutoTrain Dataset for project: test13 ===================================== Dataset Description ------------------- This dataset has been processed for project BDA594-fake-news-classification model. Dataset Structure ----------------- ### Data Instances A sample from this dataset looks as follows: ### Dataset Fields The dataset has the following fields (also called "features"): ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow:
[ "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
[ "TAGS\n#task_categories-text-classification #region-us \n", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
[ 17, 17, 23, 27 ]
[ "passage: TAGS\n#task_categories-text-classification #region-us \n### Data Instances\n\n\nA sample from this dataset looks as follows:### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
861d3d2d5ffa6f9c61171b28b11d9e01d05d4fdd
# Dataset Card for "jigsaw-multilingual-train-unique" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Intuit-GenSRF/jigsaw-multilingual-train-unique
[ "region:us" ]
2023-10-06T00:48:04+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "labels", "sequence": "string"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 24247216, "num_examples": 60299}], "download_size": 15570943, "dataset_size": 24247216}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-06T00:48:06+00:00
[]
[]
TAGS #region-us
# Dataset Card for "jigsaw-multilingual-train-unique" More Information needed
[ "# Dataset Card for \"jigsaw-multilingual-train-unique\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"jigsaw-multilingual-train-unique\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"jigsaw-multilingual-train-unique\"\n\nMore Information needed" ]
7151ccd69d0561060449471c143ac6e4291e5d51
# Dataset Card for Dataset Name <!-- Provide a quick summary of the dataset. --> This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
daytoy-models/coronary-artery
[ "language:no", "language:aa", "license:bigcode-openrail-m", "code", "region:us" ]
2023-10-06T01:11:55+00:00
{"language": ["no", "aa"], "license": "bigcode-openrail-m", "pipeline_tag": "text-generation", "inference": true, "widget": [{"text": "def print_hello_world():", "example_title": "Hello world", "group": "Python"}], "datasets": ["bigcode/the-stack-dedup"], "metrics": ["code_eval"], "library_name": "transformers", "tags": ["code"], "model-index": [{"name": "StarCoder", "results": [{"task": {"type": "text-generation"}, "dataset": {"type": "openai_humaneval", "name": "HumanEval (Prompted)"}, "metrics": [{"name": "pass@1", "type": "pass@1", "value": 0.408, "verified": false}]}, {"task": {"type": "text-generation"}, "dataset": {"type": "openai_humaneval", "name": "HumanEval"}, "metrics": [{"name": "pass@1", "type": "pass@1", "value": 0.336, "verified": false}]}, {"task": {"type": "text-generation"}, "dataset": {"type": "mbpp", "name": "MBPP"}, "metrics": [{"name": "pass@1", "type": "pass@1", "value": 0.527, "verified": false}]}, {"task": {"type": "text-generation"}, "dataset": {"type": "ds1000", "name": "DS-1000 (Overall Completion)"}, "metrics": [{"name": "pass@1", "type": "pass@1", "value": 0.26, "verified": false}]}, {"task": {"type": "text-generation"}, "dataset": {"type": "nuprl/MultiPL-E", "name": "MultiPL-HumanEval (C++)"}, "metrics": [{"name": "pass@1", "type": "pass@1", "value": 0.3155, "verified": false}]}, {"task": {"type": "text-generation"}, "dataset": {"type": "nuprl/MultiPL-E", "name": "MultiPL-HumanEval (C#)"}, "metrics": [{"name": "pass@1", "type": "pass@1", "value": 0.2101, "verified": false}]}, {"task": {"type": "text-generation"}, "dataset": {"type": "nuprl/MultiPL-E", "name": "MultiPL-HumanEval (D)"}, "metrics": [{"name": "pass@1", "type": "pass@1", "value": 0.1357, "verified": false}]}, {"task": {"type": "text-generation"}, "dataset": {"type": "nuprl/MultiPL-E", "name": "MultiPL-HumanEval (Go)"}, "metrics": [{"name": "pass@1", "type": "pass@1", "value": 0.1761, "verified": false}]}, {"task": {"type": "text-generation"}, "dataset": {"type": "nuprl/MultiPL-E", "name": "MultiPL-HumanEval (Java)"}, "metrics": [{"name": "pass@1", "type": "pass@1", "value": 0.3022, "verified": false}]}, {"task": {"type": "text-generation"}, "dataset": {"type": "nuprl/MultiPL-E", "name": "MultiPL-HumanEval (Julia)"}, "metrics": [{"name": "pass@1", "type": "pass@1", "value": 0.2302, "verified": false}]}, {"task": {"type": "text-generation"}, "dataset": {"type": "nuprl/MultiPL-E", "name": "MultiPL-HumanEval (JavaScript)"}, "metrics": [{"name": "pass@1", "type": "pass@1", "value": 0.3079, "verified": false}]}, {"task": {"type": "text-generation"}, "dataset": {"type": "nuprl/MultiPL-E", "name": "MultiPL-HumanEval (Lua)"}, "metrics": [{"name": "pass@1", "type": "pass@1", "value": 0.2389, "verified": false}]}, {"task": {"type": "text-generation"}, "dataset": {"type": "nuprl/MultiPL-E", "name": "MultiPL-HumanEval (PHP)"}, "metrics": [{"name": "pass@1", "type": "pass@1", "value": 0.2608, "verified": false}]}, {"task": {"type": "text-generation"}, "dataset": {"type": "nuprl/MultiPL-E", "name": "MultiPL-HumanEval (Perl)"}, "metrics": [{"name": "pass@1", "type": "pass@1", "value": 0.1734, "verified": false}]}, {"task": {"type": "text-generation"}, "dataset": {"type": "nuprl/MultiPL-E", "name": "MultiPL-HumanEval (Python)"}, "metrics": [{"name": "pass@1", "type": "pass@1", "value": 0.3357, "verified": false}]}, {"task": {"type": "text-generation"}, "dataset": {"type": "nuprl/MultiPL-E", "name": "MultiPL-HumanEval (R)"}, "metrics": [{"name": "pass@1", "type": "pass@1", "value": 0.155, "verified": false}]}, {"task": {"type": "text-generation"}, "dataset": {"type": "nuprl/MultiPL-E", "name": "MultiPL-HumanEval (Ruby)"}, "metrics": [{"name": "pass@1", "type": "pass@1", "value": 0.0124, "verified": false}]}, {"task": {"type": "text-generation"}, "dataset": {"type": "nuprl/MultiPL-E", "name": "MultiPL-HumanEval (Racket)"}, "metrics": [{"name": "pass@1", "type": "pass@1", "value": 0.0007, "verified": false}]}, {"task": {"type": "text-generation"}, "dataset": {"type": "nuprl/MultiPL-E", "name": "MultiPL-HumanEval (Rust)"}, "metrics": [{"name": "pass@1", "type": "pass@1", "value": 0.2184, "verified": false}]}, {"task": {"type": "text-generation"}, "dataset": {"type": "nuprl/MultiPL-E", "name": "MultiPL-HumanEval (Scala)"}, "metrics": [{"name": "pass@1", "type": "pass@1", "value": 0.2761, "verified": false}]}, {"task": {"type": "text-generation"}, "dataset": {"type": "nuprl/MultiPL-E", "name": "MultiPL-HumanEval (Bash)"}, "metrics": [{"name": "pass@1", "type": "pass@1", "value": 0.1046, "verified": false}]}, {"task": {"type": "text-generation"}, "dataset": {"type": "nuprl/MultiPL-E", "name": "MultiPL-HumanEval (Swift)"}, "metrics": [{"name": "pass@1", "type": "pass@1", "value": 0.2274, "verified": false}]}, {"task": {"type": "text-generation"}, "dataset": {"type": "nuprl/MultiPL-E", "name": "MultiPL-HumanEval (TypeScript)"}, "metrics": [{"name": "pass@1", "type": "pass@1", "value": 0.3229, "verified": false}]}]}], "extra_gated_prompt": "## Model License Agreement Please read the BigCode [OpenRAIL-M license](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement) agreement before accepting it.\n ", "extra_gated_fields": {"I accept the above license agreement, and will use the Model complying with the set of use restrictions and sharing requirements": "checkbox"}}
2024-01-22T08:40:17+00:00
[]
[ "no", "aa" ]
TAGS #language-Norwegian #language-Afar #license-bigcode-openrail-m #code #region-us
# Dataset Card for Dataset Name This dataset card aims to be a base template for new datasets. It has been generated using this raw template. ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#language-Norwegian #language-Afar #license-bigcode-openrail-m #code #region-us \n", "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 30, 34, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#language-Norwegian #language-Afar #license-bigcode-openrail-m #code #region-us \n# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
6be90e13349443376b26f008f786c5229ece350c
# Dataset Card for Wikipedia - Portuguese ## Dataset Description - latest - 20231001 ## Usage ```python from datasets import load_dataset dataset = load_dataset('pablo-moreira/wikipedia-pt', 'latest') #dataset = load_dataset('pablo-moreira/wikipedia-pt', '20231001') ``` ## Extractor Notebook with the code for extracting documents from the Wikipedia dump based on the code from the FastAI NLP introduction course. [Notebook](extractor.ipynb) ## Links - **[Wikipedia dumps](https://dumps.wikimedia.org/)** - **[A Code-First Intro to Natural Language Processing](https://github.com/fastai/course-nlp)** - **[Extractor Code](https://github.com/fastai/course-nlp/blob/master/nlputils.py)**
pablo-moreira/wikipedia-pt
[ "region:us" ]
2023-10-06T01:29:43+00:00
{"dataset_info": [{"config_name": "20231001", "features": [{"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2150584347, "num_examples": 1857355}], "download_size": 0, "dataset_size": 2150584347}, {"config_name": "latest", "features": [{"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2150584347, "num_examples": 1857355}], "download_size": 0, "dataset_size": 2150584347}], "configs": [{"config_name": "20231001", "data_files": [{"split": "train", "path": "20231001/train-*"}]}, {"config_name": "latest", "data_files": [{"split": "train", "path": "latest/train-*"}]}]}
2023-10-06T12:52:49+00:00
[]
[]
TAGS #region-us
# Dataset Card for Wikipedia - Portuguese ## Dataset Description - latest - 20231001 ## Usage ## Extractor Notebook with the code for extracting documents from the Wikipedia dump based on the code from the FastAI NLP introduction course. Notebook ## Links - Wikipedia dumps - A Code-First Intro to Natural Language Processing - Extractor Code
[ "# Dataset Card for Wikipedia - Portuguese", "## Dataset Description\n\n- latest\n- 20231001", "## Usage", "## Extractor\n\nNotebook with the code for extracting documents from the Wikipedia dump based on the code from the FastAI NLP introduction course.\n\nNotebook", "## Links\n\n- Wikipedia dumps\n- A Code-First Intro to Natural Language Processing\n- Extractor Code" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Wikipedia - Portuguese", "## Dataset Description\n\n- latest\n- 20231001", "## Usage", "## Extractor\n\nNotebook with the code for extracting documents from the Wikipedia dump based on the code from the FastAI NLP introduction course.\n\nNotebook", "## Links\n\n- Wikipedia dumps\n- A Code-First Intro to Natural Language Processing\n- Extractor Code" ]
[ 6, 10, 10, 3, 31, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Wikipedia - Portuguese## Dataset Description\n\n- latest\n- 20231001## Usage## Extractor\n\nNotebook with the code for extracting documents from the Wikipedia dump based on the code from the FastAI NLP introduction course.\n\nNotebook## Links\n\n- Wikipedia dumps\n- A Code-First Intro to Natural Language Processing\n- Extractor Code" ]
b2b739b496c214611acc099a296d39eb1a3fd519
All commonsense causal data is in COPES.json. The data is recorded in the format of json records with three fileds: 1. story: the story of 5 sentences sampled from RocStories. 2. cause_idx: the events that have a causal relation with the last event (indices start from 0). 3. res_idx: the result, just the last event. The split_idx.json provide the indices for validation and testing data, indexing from 0.
ZhaoweiWang/COPES
[ "region:us" ]
2023-10-06T02:02:06+00:00
{}
2023-10-06T02:02:33+00:00
[]
[]
TAGS #region-us
All commonsense causal data is in URL. The data is recorded in the format of json records with three fileds: 1. story: the story of 5 sentences sampled from RocStories. 2. cause_idx: the events that have a causal relation with the last event (indices start from 0). 3. res_idx: the result, just the last event. The split_idx.json provide the indices for validation and testing data, indexing from 0.
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
b46749425493d681f393986af16d75558af60650
# Dataset Card for "7e27d622" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
result-kand2-sdxl-wuerst-karlo/7e27d622
[ "region:us" ]
2023-10-06T02:10:36+00:00
{"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 232, "num_examples": 10}], "download_size": 1424, "dataset_size": 232}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-06T02:10:37+00:00
[]
[]
TAGS #region-us
# Dataset Card for "7e27d622" More Information needed
[ "# Dataset Card for \"7e27d622\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"7e27d622\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"7e27d622\"\n\nMore Information needed" ]
060b6cd424586e3cb62dc97ee8d377d78de41d5e
# Dataset Card for "diffusion.2.textual_inversion" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ayan1988/diffusion.2.textual_inversion
[ "region:us" ]
2023-10-06T02:47:01+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 1740639.0, "num_examples": 6}], "download_size": 0, "dataset_size": 1740639.0}}
2023-10-06T05:51:48+00:00
[]
[]
TAGS #region-us
# Dataset Card for "diffusion.2.textual_inversion" More Information needed
[ "# Dataset Card for \"diffusion.2.textual_inversion\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"diffusion.2.textual_inversion\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"diffusion.2.textual_inversion\"\n\nMore Information needed" ]
6bb64d9be9fb3ea15fbe459ba8fc347906c1347c
# Bangumi Image Base of Maoujou De Oyasumi This is the image base of bangumi Maoujou de Oyasumi, we detected 21 characters, 1076 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 9 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 12 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 17 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 22 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 195 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 21 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 46 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 8 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 36 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 9 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 72 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 69 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 15 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 15 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 10 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 10 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 12 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 396 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 21 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 21 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | noise | 60 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/maoujoudeoyasumi
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-06T03:14:03+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-06T04:15:32+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Maoujou De Oyasumi ======================================== This is the image base of bangumi Maoujou de Oyasumi, we detected 21 characters, 1076 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
48d49b52d03fc4c5c99c29073df7e98207d83401
# Bangumi Image Base of Punch Line This is the image base of bangumi Punch Line, we detected 17 characters, 1203 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 104 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 12 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 134 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 47 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 135 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 16 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 11 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 14 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 14 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 21 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 18 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 40 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 150 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 324 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 14 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 9 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | noise | 140 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/punchline
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-06T03:14:19+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-06T04:17:13+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Punch Line ================================ This is the image base of bangumi Punch Line, we detected 17 characters, 1203 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
ca99c4641ee218d9964ac9a3a20e77b6abb12e4d
# Dataset Card for "affixal_negation_nonce" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
joey234/affixal_negation_nonce
[ "region:us" ]
2023-10-06T03:20:09+00:00
{"dataset_info": {"features": [{"name": "word", "dtype": "string"}, {"name": "affix", "dtype": "string"}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 25284.0, "num_examples": 880}], "download_size": 9528, "dataset_size": 25284.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-28T04:17:31+00:00
[]
[]
TAGS #region-us
# Dataset Card for "affixal_negation_nonce" More Information needed
[ "# Dataset Card for \"affixal_negation_nonce\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"affixal_negation_nonce\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"affixal_negation_nonce\"\n\nMore Information needed" ]
65bfe92d7d3d75e0517e99b6bc9f375c05c50570
# Bangumi Image Base of Suzumiya Haruhi No Yuuutsu This is the image base of bangumi Suzumiya Haruhi no Yuuutsu, we detected 22 characters, 4994 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 1639 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 563 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 606 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 72 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 27 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 103 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 796 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 23 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 22 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 453 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 124 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 67 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 19 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 49 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 13 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 34 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 48 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 12 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 44 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 57 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 5 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | N/A | N/A | N/A | | noise | 218 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/suzumiyaharuhinoyuuutsu
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-06T03:31:09+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-06T06:11:00+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Suzumiya Haruhi No Yuuutsu ================================================ This is the image base of bangumi Suzumiya Haruhi no Yuuutsu, we detected 22 characters, 4994 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
fa706a45034ef727894d1eb6351339802f691891
# Bangumi Image Base of Fire Force This is the image base of bangumi Fire Force, we detected 60 characters, 5217 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 1278 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 231 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 55 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 65 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 89 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 30 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 73 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 140 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 47 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 56 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 264 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 25 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 41 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 173 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 73 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 35 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 23 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 70 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 23 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 26 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 57 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 34 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 156 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 29 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 218 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 34 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 67 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 20 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 42 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | ![preview 8](28/preview_8.png) | | 29 | 33 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | ![preview 7](29/preview_7.png) | ![preview 8](29/preview_8.png) | | 30 | 69 | [Download](30/dataset.zip) | ![preview 1](30/preview_1.png) | ![preview 2](30/preview_2.png) | ![preview 3](30/preview_3.png) | ![preview 4](30/preview_4.png) | ![preview 5](30/preview_5.png) | ![preview 6](30/preview_6.png) | ![preview 7](30/preview_7.png) | ![preview 8](30/preview_8.png) | | 31 | 34 | [Download](31/dataset.zip) | ![preview 1](31/preview_1.png) | ![preview 2](31/preview_2.png) | ![preview 3](31/preview_3.png) | ![preview 4](31/preview_4.png) | ![preview 5](31/preview_5.png) | ![preview 6](31/preview_6.png) | ![preview 7](31/preview_7.png) | ![preview 8](31/preview_8.png) | | 32 | 41 | [Download](32/dataset.zip) | ![preview 1](32/preview_1.png) | ![preview 2](32/preview_2.png) | ![preview 3](32/preview_3.png) | ![preview 4](32/preview_4.png) | ![preview 5](32/preview_5.png) | ![preview 6](32/preview_6.png) | ![preview 7](32/preview_7.png) | ![preview 8](32/preview_8.png) | | 33 | 177 | [Download](33/dataset.zip) | ![preview 1](33/preview_1.png) | ![preview 2](33/preview_2.png) | ![preview 3](33/preview_3.png) | ![preview 4](33/preview_4.png) | ![preview 5](33/preview_5.png) | ![preview 6](33/preview_6.png) | ![preview 7](33/preview_7.png) | ![preview 8](33/preview_8.png) | | 34 | 36 | [Download](34/dataset.zip) | ![preview 1](34/preview_1.png) | ![preview 2](34/preview_2.png) | ![preview 3](34/preview_3.png) | ![preview 4](34/preview_4.png) | ![preview 5](34/preview_5.png) | ![preview 6](34/preview_6.png) | ![preview 7](34/preview_7.png) | ![preview 8](34/preview_8.png) | | 35 | 299 | [Download](35/dataset.zip) | ![preview 1](35/preview_1.png) | ![preview 2](35/preview_2.png) | ![preview 3](35/preview_3.png) | ![preview 4](35/preview_4.png) | ![preview 5](35/preview_5.png) | ![preview 6](35/preview_6.png) | ![preview 7](35/preview_7.png) | ![preview 8](35/preview_8.png) | | 36 | 52 | [Download](36/dataset.zip) | ![preview 1](36/preview_1.png) | ![preview 2](36/preview_2.png) | ![preview 3](36/preview_3.png) | ![preview 4](36/preview_4.png) | ![preview 5](36/preview_5.png) | ![preview 6](36/preview_6.png) | ![preview 7](36/preview_7.png) | ![preview 8](36/preview_8.png) | | 37 | 135 | [Download](37/dataset.zip) | ![preview 1](37/preview_1.png) | ![preview 2](37/preview_2.png) | ![preview 3](37/preview_3.png) | ![preview 4](37/preview_4.png) | ![preview 5](37/preview_5.png) | ![preview 6](37/preview_6.png) | ![preview 7](37/preview_7.png) | ![preview 8](37/preview_8.png) | | 38 | 26 | [Download](38/dataset.zip) | ![preview 1](38/preview_1.png) | ![preview 2](38/preview_2.png) | ![preview 3](38/preview_3.png) | ![preview 4](38/preview_4.png) | ![preview 5](38/preview_5.png) | ![preview 6](38/preview_6.png) | ![preview 7](38/preview_7.png) | ![preview 8](38/preview_8.png) | | 39 | 26 | [Download](39/dataset.zip) | ![preview 1](39/preview_1.png) | ![preview 2](39/preview_2.png) | ![preview 3](39/preview_3.png) | ![preview 4](39/preview_4.png) | ![preview 5](39/preview_5.png) | ![preview 6](39/preview_6.png) | ![preview 7](39/preview_7.png) | ![preview 8](39/preview_8.png) | | 40 | 5 | [Download](40/dataset.zip) | ![preview 1](40/preview_1.png) | ![preview 2](40/preview_2.png) | ![preview 3](40/preview_3.png) | ![preview 4](40/preview_4.png) | ![preview 5](40/preview_5.png) | N/A | N/A | N/A | | 41 | 25 | [Download](41/dataset.zip) | ![preview 1](41/preview_1.png) | ![preview 2](41/preview_2.png) | ![preview 3](41/preview_3.png) | ![preview 4](41/preview_4.png) | ![preview 5](41/preview_5.png) | ![preview 6](41/preview_6.png) | ![preview 7](41/preview_7.png) | ![preview 8](41/preview_8.png) | | 42 | 15 | [Download](42/dataset.zip) | ![preview 1](42/preview_1.png) | ![preview 2](42/preview_2.png) | ![preview 3](42/preview_3.png) | ![preview 4](42/preview_4.png) | ![preview 5](42/preview_5.png) | ![preview 6](42/preview_6.png) | ![preview 7](42/preview_7.png) | ![preview 8](42/preview_8.png) | | 43 | 12 | [Download](43/dataset.zip) | ![preview 1](43/preview_1.png) | ![preview 2](43/preview_2.png) | ![preview 3](43/preview_3.png) | ![preview 4](43/preview_4.png) | ![preview 5](43/preview_5.png) | ![preview 6](43/preview_6.png) | ![preview 7](43/preview_7.png) | ![preview 8](43/preview_8.png) | | 44 | 22 | [Download](44/dataset.zip) | ![preview 1](44/preview_1.png) | ![preview 2](44/preview_2.png) | ![preview 3](44/preview_3.png) | ![preview 4](44/preview_4.png) | ![preview 5](44/preview_5.png) | ![preview 6](44/preview_6.png) | ![preview 7](44/preview_7.png) | ![preview 8](44/preview_8.png) | | 45 | 17 | [Download](45/dataset.zip) | ![preview 1](45/preview_1.png) | ![preview 2](45/preview_2.png) | ![preview 3](45/preview_3.png) | ![preview 4](45/preview_4.png) | ![preview 5](45/preview_5.png) | ![preview 6](45/preview_6.png) | ![preview 7](45/preview_7.png) | ![preview 8](45/preview_8.png) | | 46 | 85 | [Download](46/dataset.zip) | ![preview 1](46/preview_1.png) | ![preview 2](46/preview_2.png) | ![preview 3](46/preview_3.png) | ![preview 4](46/preview_4.png) | ![preview 5](46/preview_5.png) | ![preview 6](46/preview_6.png) | ![preview 7](46/preview_7.png) | ![preview 8](46/preview_8.png) | | 47 | 12 | [Download](47/dataset.zip) | ![preview 1](47/preview_1.png) | ![preview 2](47/preview_2.png) | ![preview 3](47/preview_3.png) | ![preview 4](47/preview_4.png) | ![preview 5](47/preview_5.png) | ![preview 6](47/preview_6.png) | ![preview 7](47/preview_7.png) | ![preview 8](47/preview_8.png) | | 48 | 85 | [Download](48/dataset.zip) | ![preview 1](48/preview_1.png) | ![preview 2](48/preview_2.png) | ![preview 3](48/preview_3.png) | ![preview 4](48/preview_4.png) | ![preview 5](48/preview_5.png) | ![preview 6](48/preview_6.png) | ![preview 7](48/preview_7.png) | ![preview 8](48/preview_8.png) | | 49 | 33 | [Download](49/dataset.zip) | ![preview 1](49/preview_1.png) | ![preview 2](49/preview_2.png) | ![preview 3](49/preview_3.png) | ![preview 4](49/preview_4.png) | ![preview 5](49/preview_5.png) | ![preview 6](49/preview_6.png) | ![preview 7](49/preview_7.png) | ![preview 8](49/preview_8.png) | | 50 | 37 | [Download](50/dataset.zip) | ![preview 1](50/preview_1.png) | ![preview 2](50/preview_2.png) | ![preview 3](50/preview_3.png) | ![preview 4](50/preview_4.png) | ![preview 5](50/preview_5.png) | ![preview 6](50/preview_6.png) | ![preview 7](50/preview_7.png) | ![preview 8](50/preview_8.png) | | 51 | 17 | [Download](51/dataset.zip) | ![preview 1](51/preview_1.png) | ![preview 2](51/preview_2.png) | ![preview 3](51/preview_3.png) | ![preview 4](51/preview_4.png) | ![preview 5](51/preview_5.png) | ![preview 6](51/preview_6.png) | ![preview 7](51/preview_7.png) | ![preview 8](51/preview_8.png) | | 52 | 122 | [Download](52/dataset.zip) | ![preview 1](52/preview_1.png) | ![preview 2](52/preview_2.png) | ![preview 3](52/preview_3.png) | ![preview 4](52/preview_4.png) | ![preview 5](52/preview_5.png) | ![preview 6](52/preview_6.png) | ![preview 7](52/preview_7.png) | ![preview 8](52/preview_8.png) | | 53 | 25 | [Download](53/dataset.zip) | ![preview 1](53/preview_1.png) | ![preview 2](53/preview_2.png) | ![preview 3](53/preview_3.png) | ![preview 4](53/preview_4.png) | ![preview 5](53/preview_5.png) | ![preview 6](53/preview_6.png) | ![preview 7](53/preview_7.png) | ![preview 8](53/preview_8.png) | | 54 | 60 | [Download](54/dataset.zip) | ![preview 1](54/preview_1.png) | ![preview 2](54/preview_2.png) | ![preview 3](54/preview_3.png) | ![preview 4](54/preview_4.png) | ![preview 5](54/preview_5.png) | ![preview 6](54/preview_6.png) | ![preview 7](54/preview_7.png) | ![preview 8](54/preview_8.png) | | 55 | 13 | [Download](55/dataset.zip) | ![preview 1](55/preview_1.png) | ![preview 2](55/preview_2.png) | ![preview 3](55/preview_3.png) | ![preview 4](55/preview_4.png) | ![preview 5](55/preview_5.png) | ![preview 6](55/preview_6.png) | ![preview 7](55/preview_7.png) | ![preview 8](55/preview_8.png) | | 56 | 6 | [Download](56/dataset.zip) | ![preview 1](56/preview_1.png) | ![preview 2](56/preview_2.png) | ![preview 3](56/preview_3.png) | ![preview 4](56/preview_4.png) | ![preview 5](56/preview_5.png) | ![preview 6](56/preview_6.png) | N/A | N/A | | 57 | 5 | [Download](57/dataset.zip) | ![preview 1](57/preview_1.png) | ![preview 2](57/preview_2.png) | ![preview 3](57/preview_3.png) | ![preview 4](57/preview_4.png) | ![preview 5](57/preview_5.png) | N/A | N/A | N/A | | 58 | 10 | [Download](58/dataset.zip) | ![preview 1](58/preview_1.png) | ![preview 2](58/preview_2.png) | ![preview 3](58/preview_3.png) | ![preview 4](58/preview_4.png) | ![preview 5](58/preview_5.png) | ![preview 6](58/preview_6.png) | ![preview 7](58/preview_7.png) | ![preview 8](58/preview_8.png) | | noise | 209 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/fireforce
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-06T03:46:39+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-06T07:11:49+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Fire Force ================================ This is the image base of bangumi Fire Force, we detected 60 characters, 5217 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
40daed634ab7148f22e357417ef781cbbbf581e8
# Bangumi Image Base of Zetsuen No Tempest This is the image base of bangumi Zetsuen no Tempest, we detected 16 characters, 2070 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 75 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 405 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 19 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 435 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 15 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 35 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 40 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 124 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 10 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 28 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 402 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 88 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 55 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 40 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 175 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | noise | 124 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/zetsuennotempest
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-06T03:46:59+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-06T05:18:35+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Zetsuen No Tempest ======================================== This is the image base of bangumi Zetsuen no Tempest, we detected 16 characters, 2070 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
4460c25c197a989b93719972c3109aede0673167
# Dataset Card for "AnikaBasu-CyberbullyingDataset-es" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Intuit-GenSRF/AnikaBasu-CyberbullyingDataset-es
[ "region:us" ]
2023-10-06T04:35:05+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "labels", "sequence": "string"}, {"name": "__index_level_0__", "dtype": "int64"}, {"name": "processed_text", "sequence": "string"}, {"name": "text_es", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1407598, "num_examples": 2955}], "download_size": 0, "dataset_size": 1407598}}
2023-10-06T18:33:44+00:00
[]
[]
TAGS #region-us
# Dataset Card for "AnikaBasu-CyberbullyingDataset-es" More Information needed
[ "# Dataset Card for \"AnikaBasu-CyberbullyingDataset-es\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"AnikaBasu-CyberbullyingDataset-es\"\n\nMore Information needed" ]
[ 6, 24 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"AnikaBasu-CyberbullyingDataset-es\"\n\nMore Information needed" ]
c4cec43b8d2d18e3d303ab4b5cc7d1a029e8ccc2
# Bangumi Image Base of Violet Evergarden This is the image base of bangumi Violet Evergarden, we detected 72 characters, 4728 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 32 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 166 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 16 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 23 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 160 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 66 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 17 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 13 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 23 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 15 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 38 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 30 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 31 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 23 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 15 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 34 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 104 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 23 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 103 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 75 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 32 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 27 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 21 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 25 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 19 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 13 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 22 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 180 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 39 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | ![preview 8](28/preview_8.png) | | 29 | 16 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | ![preview 7](29/preview_7.png) | ![preview 8](29/preview_8.png) | | 30 | 156 | [Download](30/dataset.zip) | ![preview 1](30/preview_1.png) | ![preview 2](30/preview_2.png) | ![preview 3](30/preview_3.png) | ![preview 4](30/preview_4.png) | ![preview 5](30/preview_5.png) | ![preview 6](30/preview_6.png) | ![preview 7](30/preview_7.png) | ![preview 8](30/preview_8.png) | | 31 | 21 | [Download](31/dataset.zip) | ![preview 1](31/preview_1.png) | ![preview 2](31/preview_2.png) | ![preview 3](31/preview_3.png) | ![preview 4](31/preview_4.png) | ![preview 5](31/preview_5.png) | ![preview 6](31/preview_6.png) | ![preview 7](31/preview_7.png) | ![preview 8](31/preview_8.png) | | 32 | 70 | [Download](32/dataset.zip) | ![preview 1](32/preview_1.png) | ![preview 2](32/preview_2.png) | ![preview 3](32/preview_3.png) | ![preview 4](32/preview_4.png) | ![preview 5](32/preview_5.png) | ![preview 6](32/preview_6.png) | ![preview 7](32/preview_7.png) | ![preview 8](32/preview_8.png) | | 33 | 23 | [Download](33/dataset.zip) | ![preview 1](33/preview_1.png) | ![preview 2](33/preview_2.png) | ![preview 3](33/preview_3.png) | ![preview 4](33/preview_4.png) | ![preview 5](33/preview_5.png) | ![preview 6](33/preview_6.png) | ![preview 7](33/preview_7.png) | ![preview 8](33/preview_8.png) | | 34 | 180 | [Download](34/dataset.zip) | ![preview 1](34/preview_1.png) | ![preview 2](34/preview_2.png) | ![preview 3](34/preview_3.png) | ![preview 4](34/preview_4.png) | ![preview 5](34/preview_5.png) | ![preview 6](34/preview_6.png) | ![preview 7](34/preview_7.png) | ![preview 8](34/preview_8.png) | | 35 | 12 | [Download](35/dataset.zip) | ![preview 1](35/preview_1.png) | ![preview 2](35/preview_2.png) | ![preview 3](35/preview_3.png) | ![preview 4](35/preview_4.png) | ![preview 5](35/preview_5.png) | ![preview 6](35/preview_6.png) | ![preview 7](35/preview_7.png) | ![preview 8](35/preview_8.png) | | 36 | 39 | [Download](36/dataset.zip) | ![preview 1](36/preview_1.png) | ![preview 2](36/preview_2.png) | ![preview 3](36/preview_3.png) | ![preview 4](36/preview_4.png) | ![preview 5](36/preview_5.png) | ![preview 6](36/preview_6.png) | ![preview 7](36/preview_7.png) | ![preview 8](36/preview_8.png) | | 37 | 58 | [Download](37/dataset.zip) | ![preview 1](37/preview_1.png) | ![preview 2](37/preview_2.png) | ![preview 3](37/preview_3.png) | ![preview 4](37/preview_4.png) | ![preview 5](37/preview_5.png) | ![preview 6](37/preview_6.png) | ![preview 7](37/preview_7.png) | ![preview 8](37/preview_8.png) | | 38 | 108 | [Download](38/dataset.zip) | ![preview 1](38/preview_1.png) | ![preview 2](38/preview_2.png) | ![preview 3](38/preview_3.png) | ![preview 4](38/preview_4.png) | ![preview 5](38/preview_5.png) | ![preview 6](38/preview_6.png) | ![preview 7](38/preview_7.png) | ![preview 8](38/preview_8.png) | | 39 | 27 | [Download](39/dataset.zip) | ![preview 1](39/preview_1.png) | ![preview 2](39/preview_2.png) | ![preview 3](39/preview_3.png) | ![preview 4](39/preview_4.png) | ![preview 5](39/preview_5.png) | ![preview 6](39/preview_6.png) | ![preview 7](39/preview_7.png) | ![preview 8](39/preview_8.png) | | 40 | 38 | [Download](40/dataset.zip) | ![preview 1](40/preview_1.png) | ![preview 2](40/preview_2.png) | ![preview 3](40/preview_3.png) | ![preview 4](40/preview_4.png) | ![preview 5](40/preview_5.png) | ![preview 6](40/preview_6.png) | ![preview 7](40/preview_7.png) | ![preview 8](40/preview_8.png) | | 41 | 32 | [Download](41/dataset.zip) | ![preview 1](41/preview_1.png) | ![preview 2](41/preview_2.png) | ![preview 3](41/preview_3.png) | ![preview 4](41/preview_4.png) | ![preview 5](41/preview_5.png) | ![preview 6](41/preview_6.png) | ![preview 7](41/preview_7.png) | ![preview 8](41/preview_8.png) | | 42 | 335 | [Download](42/dataset.zip) | ![preview 1](42/preview_1.png) | ![preview 2](42/preview_2.png) | ![preview 3](42/preview_3.png) | ![preview 4](42/preview_4.png) | ![preview 5](42/preview_5.png) | ![preview 6](42/preview_6.png) | ![preview 7](42/preview_7.png) | ![preview 8](42/preview_8.png) | | 43 | 84 | [Download](43/dataset.zip) | ![preview 1](43/preview_1.png) | ![preview 2](43/preview_2.png) | ![preview 3](43/preview_3.png) | ![preview 4](43/preview_4.png) | ![preview 5](43/preview_5.png) | ![preview 6](43/preview_6.png) | ![preview 7](43/preview_7.png) | ![preview 8](43/preview_8.png) | | 44 | 16 | [Download](44/dataset.zip) | ![preview 1](44/preview_1.png) | ![preview 2](44/preview_2.png) | ![preview 3](44/preview_3.png) | ![preview 4](44/preview_4.png) | ![preview 5](44/preview_5.png) | ![preview 6](44/preview_6.png) | ![preview 7](44/preview_7.png) | ![preview 8](44/preview_8.png) | | 45 | 94 | [Download](45/dataset.zip) | ![preview 1](45/preview_1.png) | ![preview 2](45/preview_2.png) | ![preview 3](45/preview_3.png) | ![preview 4](45/preview_4.png) | ![preview 5](45/preview_5.png) | ![preview 6](45/preview_6.png) | ![preview 7](45/preview_7.png) | ![preview 8](45/preview_8.png) | | 46 | 19 | [Download](46/dataset.zip) | ![preview 1](46/preview_1.png) | ![preview 2](46/preview_2.png) | ![preview 3](46/preview_3.png) | ![preview 4](46/preview_4.png) | ![preview 5](46/preview_5.png) | ![preview 6](46/preview_6.png) | ![preview 7](46/preview_7.png) | ![preview 8](46/preview_8.png) | | 47 | 67 | [Download](47/dataset.zip) | ![preview 1](47/preview_1.png) | ![preview 2](47/preview_2.png) | ![preview 3](47/preview_3.png) | ![preview 4](47/preview_4.png) | ![preview 5](47/preview_5.png) | ![preview 6](47/preview_6.png) | ![preview 7](47/preview_7.png) | ![preview 8](47/preview_8.png) | | 48 | 20 | [Download](48/dataset.zip) | ![preview 1](48/preview_1.png) | ![preview 2](48/preview_2.png) | ![preview 3](48/preview_3.png) | ![preview 4](48/preview_4.png) | ![preview 5](48/preview_5.png) | ![preview 6](48/preview_6.png) | ![preview 7](48/preview_7.png) | ![preview 8](48/preview_8.png) | | 49 | 79 | [Download](49/dataset.zip) | ![preview 1](49/preview_1.png) | ![preview 2](49/preview_2.png) | ![preview 3](49/preview_3.png) | ![preview 4](49/preview_4.png) | ![preview 5](49/preview_5.png) | ![preview 6](49/preview_6.png) | ![preview 7](49/preview_7.png) | ![preview 8](49/preview_8.png) | | 50 | 17 | [Download](50/dataset.zip) | ![preview 1](50/preview_1.png) | ![preview 2](50/preview_2.png) | ![preview 3](50/preview_3.png) | ![preview 4](50/preview_4.png) | ![preview 5](50/preview_5.png) | ![preview 6](50/preview_6.png) | ![preview 7](50/preview_7.png) | ![preview 8](50/preview_8.png) | | 51 | 9 | [Download](51/dataset.zip) | ![preview 1](51/preview_1.png) | ![preview 2](51/preview_2.png) | ![preview 3](51/preview_3.png) | ![preview 4](51/preview_4.png) | ![preview 5](51/preview_5.png) | ![preview 6](51/preview_6.png) | ![preview 7](51/preview_7.png) | ![preview 8](51/preview_8.png) | | 52 | 6 | [Download](52/dataset.zip) | ![preview 1](52/preview_1.png) | ![preview 2](52/preview_2.png) | ![preview 3](52/preview_3.png) | ![preview 4](52/preview_4.png) | ![preview 5](52/preview_5.png) | ![preview 6](52/preview_6.png) | N/A | N/A | | 53 | 34 | [Download](53/dataset.zip) | ![preview 1](53/preview_1.png) | ![preview 2](53/preview_2.png) | ![preview 3](53/preview_3.png) | ![preview 4](53/preview_4.png) | ![preview 5](53/preview_5.png) | ![preview 6](53/preview_6.png) | ![preview 7](53/preview_7.png) | ![preview 8](53/preview_8.png) | | 54 | 12 | [Download](54/dataset.zip) | ![preview 1](54/preview_1.png) | ![preview 2](54/preview_2.png) | ![preview 3](54/preview_3.png) | ![preview 4](54/preview_4.png) | ![preview 5](54/preview_5.png) | ![preview 6](54/preview_6.png) | ![preview 7](54/preview_7.png) | ![preview 8](54/preview_8.png) | | 55 | 31 | [Download](55/dataset.zip) | ![preview 1](55/preview_1.png) | ![preview 2](55/preview_2.png) | ![preview 3](55/preview_3.png) | ![preview 4](55/preview_4.png) | ![preview 5](55/preview_5.png) | ![preview 6](55/preview_6.png) | ![preview 7](55/preview_7.png) | ![preview 8](55/preview_8.png) | | 56 | 33 | [Download](56/dataset.zip) | ![preview 1](56/preview_1.png) | ![preview 2](56/preview_2.png) | ![preview 3](56/preview_3.png) | ![preview 4](56/preview_4.png) | ![preview 5](56/preview_5.png) | ![preview 6](56/preview_6.png) | ![preview 7](56/preview_7.png) | ![preview 8](56/preview_8.png) | | 57 | 17 | [Download](57/dataset.zip) | ![preview 1](57/preview_1.png) | ![preview 2](57/preview_2.png) | ![preview 3](57/preview_3.png) | ![preview 4](57/preview_4.png) | ![preview 5](57/preview_5.png) | ![preview 6](57/preview_6.png) | ![preview 7](57/preview_7.png) | ![preview 8](57/preview_8.png) | | 58 | 24 | [Download](58/dataset.zip) | ![preview 1](58/preview_1.png) | ![preview 2](58/preview_2.png) | ![preview 3](58/preview_3.png) | ![preview 4](58/preview_4.png) | ![preview 5](58/preview_5.png) | ![preview 6](58/preview_6.png) | ![preview 7](58/preview_7.png) | ![preview 8](58/preview_8.png) | | 59 | 1282 | [Download](59/dataset.zip) | ![preview 1](59/preview_1.png) | ![preview 2](59/preview_2.png) | ![preview 3](59/preview_3.png) | ![preview 4](59/preview_4.png) | ![preview 5](59/preview_5.png) | ![preview 6](59/preview_6.png) | ![preview 7](59/preview_7.png) | ![preview 8](59/preview_8.png) | | 60 | 20 | [Download](60/dataset.zip) | ![preview 1](60/preview_1.png) | ![preview 2](60/preview_2.png) | ![preview 3](60/preview_3.png) | ![preview 4](60/preview_4.png) | ![preview 5](60/preview_5.png) | ![preview 6](60/preview_6.png) | ![preview 7](60/preview_7.png) | ![preview 8](60/preview_8.png) | | 61 | 10 | [Download](61/dataset.zip) | ![preview 1](61/preview_1.png) | ![preview 2](61/preview_2.png) | ![preview 3](61/preview_3.png) | ![preview 4](61/preview_4.png) | ![preview 5](61/preview_5.png) | ![preview 6](61/preview_6.png) | ![preview 7](61/preview_7.png) | ![preview 8](61/preview_8.png) | | 62 | 8 | [Download](62/dataset.zip) | ![preview 1](62/preview_1.png) | ![preview 2](62/preview_2.png) | ![preview 3](62/preview_3.png) | ![preview 4](62/preview_4.png) | ![preview 5](62/preview_5.png) | ![preview 6](62/preview_6.png) | ![preview 7](62/preview_7.png) | ![preview 8](62/preview_8.png) | | 63 | 20 | [Download](63/dataset.zip) | ![preview 1](63/preview_1.png) | ![preview 2](63/preview_2.png) | ![preview 3](63/preview_3.png) | ![preview 4](63/preview_4.png) | ![preview 5](63/preview_5.png) | ![preview 6](63/preview_6.png) | ![preview 7](63/preview_7.png) | ![preview 8](63/preview_8.png) | | 64 | 12 | [Download](64/dataset.zip) | ![preview 1](64/preview_1.png) | ![preview 2](64/preview_2.png) | ![preview 3](64/preview_3.png) | ![preview 4](64/preview_4.png) | ![preview 5](64/preview_5.png) | ![preview 6](64/preview_6.png) | ![preview 7](64/preview_7.png) | ![preview 8](64/preview_8.png) | | 65 | 14 | [Download](65/dataset.zip) | ![preview 1](65/preview_1.png) | ![preview 2](65/preview_2.png) | ![preview 3](65/preview_3.png) | ![preview 4](65/preview_4.png) | ![preview 5](65/preview_5.png) | ![preview 6](65/preview_6.png) | ![preview 7](65/preview_7.png) | ![preview 8](65/preview_8.png) | | 66 | 80 | [Download](66/dataset.zip) | ![preview 1](66/preview_1.png) | ![preview 2](66/preview_2.png) | ![preview 3](66/preview_3.png) | ![preview 4](66/preview_4.png) | ![preview 5](66/preview_5.png) | ![preview 6](66/preview_6.png) | ![preview 7](66/preview_7.png) | ![preview 8](66/preview_8.png) | | 67 | 9 | [Download](67/dataset.zip) | ![preview 1](67/preview_1.png) | ![preview 2](67/preview_2.png) | ![preview 3](67/preview_3.png) | ![preview 4](67/preview_4.png) | ![preview 5](67/preview_5.png) | ![preview 6](67/preview_6.png) | ![preview 7](67/preview_7.png) | ![preview 8](67/preview_8.png) | | 68 | 14 | [Download](68/dataset.zip) | ![preview 1](68/preview_1.png) | ![preview 2](68/preview_2.png) | ![preview 3](68/preview_3.png) | ![preview 4](68/preview_4.png) | ![preview 5](68/preview_5.png) | ![preview 6](68/preview_6.png) | ![preview 7](68/preview_7.png) | ![preview 8](68/preview_8.png) | | 69 | 10 | [Download](69/dataset.zip) | ![preview 1](69/preview_1.png) | ![preview 2](69/preview_2.png) | ![preview 3](69/preview_3.png) | ![preview 4](69/preview_4.png) | ![preview 5](69/preview_5.png) | ![preview 6](69/preview_6.png) | ![preview 7](69/preview_7.png) | ![preview 8](69/preview_8.png) | | 70 | 9 | [Download](70/dataset.zip) | ![preview 1](70/preview_1.png) | ![preview 2](70/preview_2.png) | ![preview 3](70/preview_3.png) | ![preview 4](70/preview_4.png) | ![preview 5](70/preview_5.png) | ![preview 6](70/preview_6.png) | ![preview 7](70/preview_7.png) | ![preview 8](70/preview_8.png) | | noise | 178 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/violetevergarden
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-06T04:43:11+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-06T07:00:10+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Violet Evergarden ======================================= This is the image base of bangumi Violet Evergarden, we detected 72 characters, 4728 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
c0ac453dd236f85c1e92a98eaec2aeba6b809aa4
# Dataset Card for "DONOTUSEDATA" Studying the effects of harmful data on LLMs. Side A. Filtered Subset of [kjj0/4chanpol-openai](https://huggingface.co/datasets/kjj0/4chanpol-openaimod) [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
BirdL/DONOTUSEDATA-SideA
[ "not-for-all-audiences", "region:us" ]
2023-10-06T04:56:53+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "sexual", "dtype": "float64"}, {"name": "hate", "dtype": "float64"}, {"name": "violence", "dtype": "float64"}, {"name": "self-harm", "dtype": "float64"}, {"name": "sexual/minors", "dtype": "float64"}, {"name": "hate/threatening", "dtype": "float64"}, {"name": "violence/graphic", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 8256999, "num_examples": 30002}], "download_size": 6382984, "dataset_size": 8256999}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "tags": ["not-for-all-audiences"]}
2023-10-07T20:59:31+00:00
[]
[]
TAGS #not-for-all-audiences #region-us
# Dataset Card for "DONOTUSEDATA" Studying the effects of harmful data on LLMs. Side A. Filtered Subset of kjj0/4chanpol-openai More Information needed
[ "# Dataset Card for \"DONOTUSEDATA\"\n\nStudying the effects of harmful data on LLMs. Side A.\nFiltered Subset of kjj0/4chanpol-openai\n\nMore Information needed" ]
[ "TAGS\n#not-for-all-audiences #region-us \n", "# Dataset Card for \"DONOTUSEDATA\"\n\nStudying the effects of harmful data on LLMs. Side A.\nFiltered Subset of kjj0/4chanpol-openai\n\nMore Information needed" ]
[ 15, 46 ]
[ "passage: TAGS\n#not-for-all-audiences #region-us \n# Dataset Card for \"DONOTUSEDATA\"\n\nStudying the effects of harmful data on LLMs. Side A.\nFiltered Subset of kjj0/4chanpol-openai\n\nMore Information needed" ]
4ca4d4e9711febb9a096790bca01d7f50370c6ba
# Dataset Card for "massive_5_lang_DA_tokenized" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
carnival13/massive_5_lang_DA_tokenized
[ "region:us" ]
2023-10-06T04:59:49+00:00
{"dataset_info": {"features": [{"name": "pass_label", "dtype": "int64"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 424287645, "num_examples": 552890}], "download_size": 127805722, "dataset_size": 424287645}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-06T05:00:05+00:00
[]
[]
TAGS #region-us
# Dataset Card for "massive_5_lang_DA_tokenized" More Information needed
[ "# Dataset Card for \"massive_5_lang_DA_tokenized\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"massive_5_lang_DA_tokenized\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"massive_5_lang_DA_tokenized\"\n\nMore Information needed" ]
85cca30caf0e140e40133bc7810eb79d2a048e6e
# Dataset Card for "Dataset_for_phi" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
SniiKz/Dataset_for_phi
[ "region:us" ]
2023-10-06T05:06:55+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 830921, "num_examples": 2645}], "download_size": 197574, "dataset_size": 830921}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-06T05:07:00+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Dataset_for_phi" More Information needed
[ "# Dataset Card for \"Dataset_for_phi\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Dataset_for_phi\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"Dataset_for_phi\"\n\nMore Information needed" ]
de79af47e89f6fbf91e15ac6171d687ba3a53868
# Dataset Card for "DONOTUSEDATA-SideB" Studying the effects of harmful data on LLMs. Side B. [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
BirdL/DONOTUSEDATA-SideB
[ "not-for-all-audiences", "region:us" ]
2023-10-06T05:14:16+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "sexual", "dtype": "float64"}, {"name": "hate", "dtype": "float64"}, {"name": "violence", "dtype": "float64"}, {"name": "self-harm", "dtype": "float64"}, {"name": "sexual/minors", "dtype": "float64"}, {"name": "hate/threatening", "dtype": "float64"}, {"name": "violence/graphic", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 6855523, "num_examples": 30002}], "download_size": 5665789, "dataset_size": 6855523}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "tags": ["not-for-all-audiences"]}
2023-10-07T20:46:48+00:00
[]
[]
TAGS #not-for-all-audiences #region-us
# Dataset Card for "DONOTUSEDATA-SideB" Studying the effects of harmful data on LLMs. Side B. More Information needed
[ "# Dataset Card for \"DONOTUSEDATA-SideB\"\n\nStudying the effects of harmful data on LLMs. Side B.\n\nMore Information needed" ]
[ "TAGS\n#not-for-all-audiences #region-us \n", "# Dataset Card for \"DONOTUSEDATA-SideB\"\n\nStudying the effects of harmful data on LLMs. Side B.\n\nMore Information needed" ]
[ 15, 36 ]
[ "passage: TAGS\n#not-for-all-audiences #region-us \n# Dataset Card for \"DONOTUSEDATA-SideB\"\n\nStudying the effects of harmful data on LLMs. Side B.\n\nMore Information needed" ]
b402e411f9e1c040b292f8e5b5e1a7814c32c7f6
# Dataset Card for "massive_10_lang_DA_tokenized" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
carnival13/massive_10_lang_DA_tokenized
[ "region:us" ]
2023-10-06T05:17:10+00:00
{"dataset_info": {"features": [{"name": "pass_label", "dtype": "int64"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 780462405, "num_examples": 1011790}], "download_size": 248643037, "dataset_size": 780462405}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-06T05:17:31+00:00
[]
[]
TAGS #region-us
# Dataset Card for "massive_10_lang_DA_tokenized" More Information needed
[ "# Dataset Card for \"massive_10_lang_DA_tokenized\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"massive_10_lang_DA_tokenized\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"massive_10_lang_DA_tokenized\"\n\nMore Information needed" ]
6a6b8b65947917196a8472bab4918218039b2fef
# Dataset Card for "fantasy_animal_prompts" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Falah/fantasy_animal_prompts
[ "region:us" ]
2023-10-06T05:20:18+00:00
{"dataset_info": {"features": [{"name": "prompts", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2645706, "num_examples": 10000}], "download_size": 335130, "dataset_size": 2645706}}
2023-10-06T05:41:41+00:00
[]
[]
TAGS #region-us
# Dataset Card for "fantasy_animal_prompts" More Information needed
[ "# Dataset Card for \"fantasy_animal_prompts\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"fantasy_animal_prompts\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"fantasy_animal_prompts\"\n\nMore Information needed" ]
13d060016fe412434b0524c6fc1e81e1b01c9d87
# DopeorNope/Eng_Kor_COT_combined **(주)미디어그룹사람과숲과 (주)마커의 LLM 연구 컨소시엄에서 개발된 모델입니다** **The license is `cc-by-nc-sa-4.0`.** - KOpen-platypus + DopeorNope/2000sample_COT - 데이터셋 이용하셔서 모델이나 데이터셋을 만드실 때, 간단한 출처 표기를 해주신다면 연구에 큰 도움이 됩니다😭😭 - 고품질 한국어 데이터셋 + COT 영문 데이터셋을 합친 영한 데이터셋으로 기존 Lamma2의 베이스인 english를 통한 추론을 기대하며 개발한 데이터셋입니다. - COT 데이터는 카이스트의 데이터를 Sampling하여 다양한 Knoledge를 함양할수있도록 데이터를 Sampling 하였습니다. [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
DopeorNope/Eng_Kor_COT_combined
[ "license:cc-by-nc-sa-4.0", "region:us" ]
2023-10-06T05:37:55+00:00
{"license": "cc-by-nc-sa-4.0", "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "instruction", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 36071886, "num_examples": 27085}], "download_size": 19831176, "dataset_size": 36071886}}
2023-10-19T14:41:51+00:00
[]
[]
TAGS #license-cc-by-nc-sa-4.0 #region-us
# DopeorNope/Eng_Kor_COT_combined (주)미디어그룹사람과숲과 (주)마커의 LLM 연구 컨소시엄에서 개발된 모델입니다 The license is 'cc-by-nc-sa-4.0'. - KOpen-platypus + DopeorNope/2000sample_COT - 데이터셋 이용하셔서 모델이나 데이터셋을 만드실 때, 간단한 출처 표기를 해주신다면 연구에 큰 도움이 됩니다 - 고품질 한국어 데이터셋 + COT 영문 데이터셋을 합친 영한 데이터셋으로 기존 Lamma2의 베이스인 english를 통한 추론을 기대하며 개발한 데이터셋입니다. - COT 데이터는 카이스트의 데이터를 Sampling하여 다양한 Knoledge를 함양할수있도록 데이터를 Sampling 하였습니다. More Information needed
[ "# DopeorNope/Eng_Kor_COT_combined\n\n(주)미디어그룹사람과숲과 (주)마커의 LLM 연구 컨소시엄에서 개발된 모델입니다 \nThe license is 'cc-by-nc-sa-4.0'. \n\n- KOpen-platypus + DopeorNope/2000sample_COT\n\n- 데이터셋 이용하셔서 모델이나 데이터셋을 만드실 때, 간단한 출처 표기를 해주신다면 연구에 큰 도움이 됩니다\n\n- 고품질 한국어 데이터셋 + COT 영문 데이터셋을 합친 영한 데이터셋으로 기존 Lamma2의 베이스인 english를 통한 추론을 기대하며 개발한 데이터셋입니다.\n\n- COT 데이터는 카이스트의 데이터를 Sampling하여 다양한 Knoledge를 함양할수있도록 데이터를 Sampling 하였습니다. \n\n\nMore Information needed" ]
[ "TAGS\n#license-cc-by-nc-sa-4.0 #region-us \n", "# DopeorNope/Eng_Kor_COT_combined\n\n(주)미디어그룹사람과숲과 (주)마커의 LLM 연구 컨소시엄에서 개발된 모델입니다 \nThe license is 'cc-by-nc-sa-4.0'. \n\n- KOpen-platypus + DopeorNope/2000sample_COT\n\n- 데이터셋 이용하셔서 모델이나 데이터셋을 만드실 때, 간단한 출처 표기를 해주신다면 연구에 큰 도움이 됩니다\n\n- 고품질 한국어 데이터셋 + COT 영문 데이터셋을 합친 영한 데이터셋으로 기존 Lamma2의 베이스인 english를 통한 추론을 기대하며 개발한 데이터셋입니다.\n\n- COT 데이터는 카이스트의 데이터를 Sampling하여 다양한 Knoledge를 함양할수있도록 데이터를 Sampling 하였습니다. \n\n\nMore Information needed" ]
[ 19, 186 ]
[ "passage: TAGS\n#license-cc-by-nc-sa-4.0 #region-us \n# DopeorNope/Eng_Kor_COT_combined\n\n(주)미디어그룹사람과숲과 (주)마커의 LLM 연구 컨소시엄에서 개발된 모델입니다 \nThe license is 'cc-by-nc-sa-4.0'. \n\n- KOpen-platypus + DopeorNope/2000sample_COT\n\n- 데이터셋 이용하셔서 모델이나 데이터셋을 만드실 때, 간단한 출처 표기를 해주신다면 연구에 큰 도움이 됩니다\n\n- 고품질 한국어 데이터셋 + COT 영문 데이터셋을 합친 영한 데이터셋으로 기존 Lamma2의 베이스인 english를 통한 추론을 기대하며 개발한 데이터셋입니다.\n\n- COT 데이터는 카이스트의 데이터를 Sampling하여 다양한 Knoledge를 함양할수있도록 데이터를 Sampling 하였습니다. \n\n\nMore Information needed" ]
5895e305a6230210f4c3b39eddab55db8d8cecbd
There are 11 scenes contained in the dataset from folder 1 to folder 11. Each scene folder contains several video folders represent each recorded videos under that scene. Each video folder contains a RGB folder, a depth folder, a point cloud folder and a ground truth folder. If you want to use this EgoPAT3Dv2 dataset's RGB modality as we do, you need to generate HDF5 file on your own with the script we provided(about 500GB since huggingface doesn't support that large file): 1. Download all of these scene folders. Extract each video folder from video zip files in the scene folder using unzip command and delete all useless files inside. 2. To use RGB modality, you need to create a new separate folder which has the same hierarchy as the scene-video-rgb structure. Put the previously extracted RGB folders for each scene each video into the same place as the original one. For example, color folder in ***"1/1.1/color"*** should be put into ***"RGB_file/1/1.1"*** as ***"RGB_file/1/1.1/color"***. 3. Run script make_RGB_dataset.py. Then you can use the provided RGBDataset tool to load dataset and create the dataloader.
ai4ce/EgoPAT3Dv2
[ "language:en", "robotics", "region:us" ]
2023-10-06T06:15:55+00:00
{"language": ["en"], "tags": ["robotics"]}
2023-10-30T05:55:27+00:00
[]
[ "en" ]
TAGS #language-English #robotics #region-us
There are 11 scenes contained in the dataset from folder 1 to folder 11. Each scene folder contains several video folders represent each recorded videos under that scene. Each video folder contains a RGB folder, a depth folder, a point cloud folder and a ground truth folder. If you want to use this EgoPAT3Dv2 dataset's RGB modality as we do, you need to generate HDF5 file on your own with the script we provided(about 500GB since huggingface doesn't support that large file): 1. Download all of these scene folders. Extract each video folder from video zip files in the scene folder using unzip command and delete all useless files inside. 2. To use RGB modality, you need to create a new separate folder which has the same hierarchy as the scene-video-rgb structure. Put the previously extracted RGB folders for each scene each video into the same place as the original one. For example, color folder in *"1/1.1/color"* should be put into *"RGB_file/1/1.1"* as *"RGB_file/1/1.1/color"*. 3. Run script make_RGB_dataset.py. Then you can use the provided RGBDataset tool to load dataset and create the dataloader.
[]
[ "TAGS\n#language-English #robotics #region-us \n" ]
[ 13 ]
[ "passage: TAGS\n#language-English #robotics #region-us \n" ]
1222237d6183ee340b469a1636a278a9234ba88a
# Dataset Card for "ali_prompts" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Falah/ali_prompts
[ "region:us" ]
2023-10-06T06:36:21+00:00
{"dataset_info": {"features": [{"name": "prompts", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 367063, "num_examples": 1000}], "download_size": 19378, "dataset_size": 367063}}
2023-10-06T06:36:22+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ali_prompts" More Information needed
[ "# Dataset Card for \"ali_prompts\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ali_prompts\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ali_prompts\"\n\nMore Information needed" ]
4392ac570c56555a952b1ea1e2fc8d990e821e77
# Dataset Card for "COVID-QA-sentence-transformer-biencoder-data-75_25" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
minh21/COVID-QA-sentence-transformer-biencoder-data-75_25
[ "region:us" ]
2023-10-06T06:38:05+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "positive", "dtype": "string"}, {"name": "negative", "dtype": "string"}, {"name": "document_id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 25188652, "num_examples": 12274}, {"name": "test", "num_bytes": 2473938, "num_examples": 1360}], "download_size": 1946559, "dataset_size": 27662590}}
2023-10-06T06:38:09+00:00
[]
[]
TAGS #region-us
# Dataset Card for "COVID-QA-sentence-transformer-biencoder-data-75_25" More Information needed
[ "# Dataset Card for \"COVID-QA-sentence-transformer-biencoder-data-75_25\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"COVID-QA-sentence-transformer-biencoder-data-75_25\"\n\nMore Information needed" ]
[ 6, 30 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"COVID-QA-sentence-transformer-biencoder-data-75_25\"\n\nMore Information needed" ]
a47059453f923756ee0c284ecced64f1669ba6bc
# Dataset Card for "COVID-QA-testset-biencoder-data-75_25" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
minh21/COVID-QA-testset-biencoder-data-75_25
[ "region:us" ]
2023-10-06T06:38:23+00:00
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "context_chunks", "sequence": "string"}, {"name": "document_id", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 48986357, "num_examples": 513}], "download_size": 8353824, "dataset_size": 48986357}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-06T06:38:26+00:00
[]
[]
TAGS #region-us
# Dataset Card for "COVID-QA-testset-biencoder-data-75_25" More Information needed
[ "# Dataset Card for \"COVID-QA-testset-biencoder-data-75_25\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"COVID-QA-testset-biencoder-data-75_25\"\n\nMore Information needed" ]
[ 6, 27 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"COVID-QA-testset-biencoder-data-75_25\"\n\nMore Information needed" ]
5a7558e73be0d76a5370d4ff7b2b5e93b0f718c8
# Dataset Card for "COVID-QA-question-answering-biencoder-data-75_25" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
minh21/COVID-QA-question-answering-biencoder-data-75_25
[ "region:us" ]
2023-10-06T06:38:55+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "context_chunks", "sequence": "string"}, {"name": "document_id", "dtype": "int64"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 59010693, "num_examples": 1348}, {"name": "validation", "num_bytes": 4567041, "num_examples": 158}], "download_size": 13833996, "dataset_size": 63577734}}
2023-10-06T06:38:59+00:00
[]
[]
TAGS #region-us
# Dataset Card for "COVID-QA-question-answering-biencoder-data-75_25" More Information needed
[ "# Dataset Card for \"COVID-QA-question-answering-biencoder-data-75_25\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"COVID-QA-question-answering-biencoder-data-75_25\"\n\nMore Information needed" ]
[ 6, 30 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"COVID-QA-question-answering-biencoder-data-75_25\"\n\nMore Information needed" ]
cc247413fd544cbb2db8c736e8842570e0ed43ae
# Dataset Card for "diffusion.interaction" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ayan1988/diffusion.interaction
[ "region:us" ]
2023-10-06T06:45:17+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 8311379.0, "num_examples": 105}], "download_size": 4666120, "dataset_size": 8311379.0}}
2023-10-06T06:45:23+00:00
[]
[]
TAGS #region-us
# Dataset Card for "diffusion.interaction" More Information needed
[ "# Dataset Card for \"diffusion.interaction\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"diffusion.interaction\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"diffusion.interaction\"\n\nMore Information needed" ]
42a87cd448313f8118a730fee157144e21bd6419
# Dataset Card for "COVID-QA-sentence-transformer-biencoder-data-65_25_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
minh21/COVID-QA-sentence-transformer-biencoder-data-65_25_10
[ "region:us" ]
2023-10-06T06:47:52+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "positive", "dtype": "string"}, {"name": "negative", "dtype": "string"}, {"name": "document_id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 4863851, "num_examples": 2378}, {"name": "test", "num_bytes": 510126, "num_examples": 269}], "download_size": 581674, "dataset_size": 5373977}}
2023-10-06T06:47:54+00:00
[]
[]
TAGS #region-us
# Dataset Card for "COVID-QA-sentence-transformer-biencoder-data-65_25_10" More Information needed
[ "# Dataset Card for \"COVID-QA-sentence-transformer-biencoder-data-65_25_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"COVID-QA-sentence-transformer-biencoder-data-65_25_10\"\n\nMore Information needed" ]
[ 6, 32 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"COVID-QA-sentence-transformer-biencoder-data-65_25_10\"\n\nMore Information needed" ]
3fe45319c9de769e3b01b89776609e61eef623ea
# Dataset Card for "COVID-QA-testset-biencoder-data-65_25_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
minh21/COVID-QA-testset-biencoder-data-65_25_10
[ "region:us" ]
2023-10-06T06:47:56+00:00
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "context_chunks", "sequence": "string"}, {"name": "document_id", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 16708455, "num_examples": 201}], "download_size": 442083, "dataset_size": 16708455}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-06T06:47:57+00:00
[]
[]
TAGS #region-us
# Dataset Card for "COVID-QA-testset-biencoder-data-65_25_10" More Information needed
[ "# Dataset Card for \"COVID-QA-testset-biencoder-data-65_25_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"COVID-QA-testset-biencoder-data-65_25_10\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"COVID-QA-testset-biencoder-data-65_25_10\"\n\nMore Information needed" ]
8f6eafb4f11be53fa613cc77f25c8ab520fa8847
# Dataset Card for "COVID-QA-question-answering-biencoder-data-65_25_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
minh21/COVID-QA-question-answering-biencoder-data-65_25_10
[ "region:us" ]
2023-10-06T06:48:16+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "context_chunks", "sequence": "string"}, {"name": "document_id", "dtype": "int64"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 55383294, "num_examples": 1170}, {"name": "validation", "num_bytes": 5172033, "num_examples": 140}], "download_size": 16954453, "dataset_size": 60555327}}
2023-10-06T06:48:19+00:00
[]
[]
TAGS #region-us
# Dataset Card for "COVID-QA-question-answering-biencoder-data-65_25_10" More Information needed
[ "# Dataset Card for \"COVID-QA-question-answering-biencoder-data-65_25_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"COVID-QA-question-answering-biencoder-data-65_25_10\"\n\nMore Information needed" ]
[ 6, 32 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"COVID-QA-question-answering-biencoder-data-65_25_10\"\n\nMore Information needed" ]
4eae19ecb08a8115b27b4a1524c962f956dc67c2
# Dataset Card for "Islamic_forest_image_prompts" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Falah/Islamic_forest_image_prompts
[ "region:us" ]
2023-10-06T06:54:15+00:00
{"dataset_info": {"features": [{"name": "prompts", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3517467, "num_examples": 10000}], "download_size": 151517, "dataset_size": 3517467}}
2023-10-06T06:54:16+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Islamic_forest_image_prompts" More Information needed
[ "# Dataset Card for \"Islamic_forest_image_prompts\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Islamic_forest_image_prompts\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"Islamic_forest_image_prompts\"\n\nMore Information needed" ]