sha
stringlengths
40
40
text
stringlengths
1
13.4M
id
stringlengths
2
117
tags
listlengths
1
7.91k
created_at
stringlengths
25
25
metadata
stringlengths
2
875k
last_modified
stringlengths
25
25
arxiv
listlengths
0
25
languages
listlengths
0
7.91k
tags_str
stringlengths
17
159k
text_str
stringlengths
1
447k
text_lists
listlengths
0
352
processed_texts
listlengths
1
353
tokens_length
listlengths
1
353
input_texts
listlengths
1
40
87b84c3e2119cc3c5f6d7c42b515847376051313
# Dataset Card for "neuronx-docs-2-14" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
philschmid/neuronx-docs-2-14
[ "region:us" ]
2023-10-02T07:15:47+00:00
{"dataset_info": {"features": [{"name": "title", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "markdown", "dtype": "string"}, {"name": "html", "dtype": "string"}, {"name": "crawlDate", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 67513723, "num_examples": 913}], "download_size": 14721061, "dataset_size": 67513723}}
2023-10-02T07:15:55+00:00
[]
[]
TAGS #region-us
# Dataset Card for "neuronx-docs-2-14" More Information needed
[ "# Dataset Card for \"neuronx-docs-2-14\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"neuronx-docs-2-14\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"neuronx-docs-2-14\"\n\nMore Information needed" ]
7dc67b211859acab7e8ea4c0bcba469893362bcf
# Dataset Card for "important_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ShinDC/important_dataset
[ "region:us" ]
2023-10-02T07:19:29+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "valid", "path": "data/valid-*"}]}], "dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}], "splits": [{"name": "train", "num_bytes": 8618263476, "num_examples": 16702061}, {"name": "valid", "num_bytes": 48072624, "num_examples": 93164}], "download_size": 3804670316, "dataset_size": 8666336100}}
2023-11-01T11:41:54+00:00
[]
[]
TAGS #region-us
# Dataset Card for "important_dataset" More Information needed
[ "# Dataset Card for \"important_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"important_dataset\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"important_dataset\"\n\nMore Information needed" ]
94de45fd6f7cae3cefa3d887d1f5ac85515ea935
# Dataset Card for "retrieval_verification_bm25_squeezebert_v2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
nikchar/retrieval_verification_bm25_squeezebert_v2
[ "region:us" ]
2023-10-02T07:37:19+00:00
{"dataset_info": {"features": [{"name": "claim", "dtype": "string"}, {"name": "evidence_wiki_url", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "retrieved_evidence_title", "sequence": "string"}, {"name": "retrieved_evidence_text", "sequence": "string"}, {"name": "labels", "dtype": "int64"}, {"name": "Retrieval_Success", "dtype": "bool"}, {"name": "Predicted_Labels", "dtype": "int64"}, {"name": "Predicted_Labels_Each_doc", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 66031496, "num_examples": 11073}], "download_size": 30811918, "dataset_size": 66031496}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-02T07:37:24+00:00
[]
[]
TAGS #region-us
# Dataset Card for "retrieval_verification_bm25_squeezebert_v2" More Information needed
[ "# Dataset Card for \"retrieval_verification_bm25_squeezebert_v2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"retrieval_verification_bm25_squeezebert_v2\"\n\nMore Information needed" ]
[ 6, 27 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"retrieval_verification_bm25_squeezebert_v2\"\n\nMore Information needed" ]
2a88ef6321c61f6cbcce1af7cae96e5a2154a830
# Dataset Card for Evaluation run of KnutJaegersberg/RWKV-pileplus-1B5-evol_instruct_v2 ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/KnutJaegersberg/RWKV-pileplus-1B5-evol_instruct_v2 - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [KnutJaegersberg/RWKV-pileplus-1B5-evol_instruct_v2](https://huggingface.co/KnutJaegersberg/RWKV-pileplus-1B5-evol_instruct_v2) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 61 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_KnutJaegersberg__RWKV-pileplus-1B5-evol_instruct_v2", "harness_truthfulqa_mc_0", split="train") ``` ## Latest results These are the [latest results from run 2023-10-02T09:01:04.742783](https://huggingface.co/datasets/open-llm-leaderboard/details_KnutJaegersberg__RWKV-pileplus-1B5-evol_instruct_v2/blob/main/results_2023-10-02T09-01-04.742783.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.25494922839990974, "acc_stderr": 0.0314010444904469, "acc_norm": 0.25761849126230174, "acc_norm_stderr": 0.03140672184687226, "mc1": 0.21542227662178703, "mc1_stderr": 0.014391902652427678, "mc2": 0.3521451447553084, "mc2_stderr": 0.013530448314563733 }, "harness|arc:challenge|25": { "acc": 0.2935153583617747, "acc_stderr": 0.013307250444941125, "acc_norm": 0.318259385665529, "acc_norm_stderr": 0.013611993916971453 }, "harness|hellaswag|10": { "acc": 0.4223262298346943, "acc_stderr": 0.0049292048643159725, "acc_norm": 0.5550687114120693, "acc_norm_stderr": 0.004959425421382027 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.28, "acc_stderr": 0.04512608598542127, "acc_norm": 0.28, "acc_norm_stderr": 0.04512608598542127 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.3111111111111111, "acc_stderr": 0.039992628766177214, "acc_norm": 0.3111111111111111, "acc_norm_stderr": 0.039992628766177214 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.23684210526315788, "acc_stderr": 0.03459777606810534, "acc_norm": 0.23684210526315788, "acc_norm_stderr": 0.03459777606810534 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.21, "acc_stderr": 0.040936018074033256, "acc_norm": 0.21, "acc_norm_stderr": 0.040936018074033256 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.19622641509433963, "acc_stderr": 0.024442388131100824, "acc_norm": 0.19622641509433963, "acc_norm_stderr": 0.024442388131100824 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.24305555555555555, "acc_stderr": 0.03586879280080341, "acc_norm": 0.24305555555555555, "acc_norm_stderr": 0.03586879280080341 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.15, "acc_stderr": 0.03588702812826371, "acc_norm": 0.15, "acc_norm_stderr": 0.03588702812826371 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.2, "acc_stderr": 0.04020151261036845, "acc_norm": 0.2, "acc_norm_stderr": 0.04020151261036845 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.29, "acc_stderr": 0.045604802157206845, "acc_norm": 0.29, "acc_norm_stderr": 0.045604802157206845 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.24855491329479767, "acc_stderr": 0.03295304696818317, "acc_norm": 0.24855491329479767, "acc_norm_stderr": 0.03295304696818317 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.12745098039215685, "acc_stderr": 0.033182249219420756, "acc_norm": 0.12745098039215685, "acc_norm_stderr": 0.033182249219420756 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.23, "acc_stderr": 0.04229525846816505, "acc_norm": 0.23, "acc_norm_stderr": 0.04229525846816505 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.30638297872340425, "acc_stderr": 0.03013590647851756, "acc_norm": 0.30638297872340425, "acc_norm_stderr": 0.03013590647851756 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.22807017543859648, "acc_stderr": 0.03947152782669415, "acc_norm": 0.22807017543859648, "acc_norm_stderr": 0.03947152782669415 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.2413793103448276, "acc_stderr": 0.03565998174135303, "acc_norm": 0.2413793103448276, "acc_norm_stderr": 0.03565998174135303 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.2671957671957672, "acc_stderr": 0.02278967314577656, "acc_norm": 0.2671957671957672, "acc_norm_stderr": 0.02278967314577656 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.19047619047619047, "acc_stderr": 0.03512207412302052, "acc_norm": 0.19047619047619047, "acc_norm_stderr": 0.03512207412302052 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.31, "acc_stderr": 0.04648231987117316, "acc_norm": 0.31, "acc_norm_stderr": 0.04648231987117316 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.2709677419354839, "acc_stderr": 0.025284416114900156, "acc_norm": 0.2709677419354839, "acc_norm_stderr": 0.025284416114900156 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.2512315270935961, "acc_stderr": 0.030516530732694436, "acc_norm": 0.2512315270935961, "acc_norm_stderr": 0.030516530732694436 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.34, "acc_stderr": 0.04760952285695235, "acc_norm": 0.34, "acc_norm_stderr": 0.04760952285695235 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.2727272727272727, "acc_stderr": 0.03477691162163659, "acc_norm": 0.2727272727272727, "acc_norm_stderr": 0.03477691162163659 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.15656565656565657, "acc_stderr": 0.025890520358141454, "acc_norm": 0.15656565656565657, "acc_norm_stderr": 0.025890520358141454 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.24870466321243523, "acc_stderr": 0.031195840877700304, "acc_norm": 0.24870466321243523, "acc_norm_stderr": 0.031195840877700304 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.2230769230769231, "acc_stderr": 0.02110773012724399, "acc_norm": 0.2230769230769231, "acc_norm_stderr": 0.02110773012724399 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.28888888888888886, "acc_stderr": 0.027634907264178544, "acc_norm": 0.28888888888888886, "acc_norm_stderr": 0.027634907264178544 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.20588235294117646, "acc_stderr": 0.026265024608275882, "acc_norm": 0.20588235294117646, "acc_norm_stderr": 0.026265024608275882 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.19205298013245034, "acc_stderr": 0.032162984205936156, "acc_norm": 0.19205298013245034, "acc_norm_stderr": 0.032162984205936156 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.22018348623853212, "acc_stderr": 0.01776597865232755, "acc_norm": 0.22018348623853212, "acc_norm_stderr": 0.01776597865232755 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.30092592592592593, "acc_stderr": 0.03128039084329882, "acc_norm": 0.30092592592592593, "acc_norm_stderr": 0.03128039084329882 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.27941176470588236, "acc_stderr": 0.031493281045079556, "acc_norm": 0.27941176470588236, "acc_norm_stderr": 0.031493281045079556 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.27848101265822783, "acc_stderr": 0.02917868230484253, "acc_norm": 0.27848101265822783, "acc_norm_stderr": 0.02917868230484253 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.2556053811659193, "acc_stderr": 0.029275891003969927, "acc_norm": 0.2556053811659193, "acc_norm_stderr": 0.029275891003969927 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.22900763358778625, "acc_stderr": 0.036853466317118506, "acc_norm": 0.22900763358778625, "acc_norm_stderr": 0.036853466317118506 }, "harness|hendrycksTest-international_law|5": { "acc": 0.38016528925619836, "acc_stderr": 0.04431324501968432, "acc_norm": 0.38016528925619836, "acc_norm_stderr": 0.04431324501968432 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.17592592592592593, "acc_stderr": 0.03680918141673881, "acc_norm": 0.17592592592592593, "acc_norm_stderr": 0.03680918141673881 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.25766871165644173, "acc_stderr": 0.03436150827846917, "acc_norm": 0.25766871165644173, "acc_norm_stderr": 0.03436150827846917 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.30357142857142855, "acc_stderr": 0.04364226155841044, "acc_norm": 0.30357142857142855, "acc_norm_stderr": 0.04364226155841044 }, "harness|hendrycksTest-management|5": { "acc": 0.2524271844660194, "acc_stderr": 0.04301250399690877, "acc_norm": 0.2524271844660194, "acc_norm_stderr": 0.04301250399690877 }, "harness|hendrycksTest-marketing|5": { "acc": 0.28205128205128205, "acc_stderr": 0.029480360549541194, "acc_norm": 0.28205128205128205, "acc_norm_stderr": 0.029480360549541194 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.28, "acc_stderr": 0.045126085985421276, "acc_norm": 0.28, "acc_norm_stderr": 0.045126085985421276 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.2515964240102171, "acc_stderr": 0.015517322365529619, "acc_norm": 0.2515964240102171, "acc_norm_stderr": 0.015517322365529619 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.30057803468208094, "acc_stderr": 0.0246853168672578, "acc_norm": 0.30057803468208094, "acc_norm_stderr": 0.0246853168672578 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.2223463687150838, "acc_stderr": 0.013907189208156881, "acc_norm": 0.2223463687150838, "acc_norm_stderr": 0.013907189208156881 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.24836601307189543, "acc_stderr": 0.02473998135511359, "acc_norm": 0.24836601307189543, "acc_norm_stderr": 0.02473998135511359 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.2797427652733119, "acc_stderr": 0.025494259350694888, "acc_norm": 0.2797427652733119, "acc_norm_stderr": 0.025494259350694888 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.2623456790123457, "acc_stderr": 0.024477222856135118, "acc_norm": 0.2623456790123457, "acc_norm_stderr": 0.024477222856135118 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.2907801418439716, "acc_stderr": 0.027090664368353178, "acc_norm": 0.2907801418439716, "acc_norm_stderr": 0.027090664368353178 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.27183833116036504, "acc_stderr": 0.011363135278651411, "acc_norm": 0.27183833116036504, "acc_norm_stderr": 0.011363135278651411 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.19852941176470587, "acc_stderr": 0.02423101337054109, "acc_norm": 0.19852941176470587, "acc_norm_stderr": 0.02423101337054109 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.2777777777777778, "acc_stderr": 0.01812022425148458, "acc_norm": 0.2777777777777778, "acc_norm_stderr": 0.01812022425148458 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.2636363636363636, "acc_stderr": 0.04220224692971987, "acc_norm": 0.2636363636363636, "acc_norm_stderr": 0.04220224692971987 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.22857142857142856, "acc_stderr": 0.026882144922307744, "acc_norm": 0.22857142857142856, "acc_norm_stderr": 0.026882144922307744 }, "harness|hendrycksTest-sociology|5": { "acc": 0.23383084577114427, "acc_stderr": 0.029929415408348384, "acc_norm": 0.23383084577114427, "acc_norm_stderr": 0.029929415408348384 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.19, "acc_stderr": 0.039427724440366234, "acc_norm": 0.19, "acc_norm_stderr": 0.039427724440366234 }, "harness|hendrycksTest-virology|5": { "acc": 0.3132530120481928, "acc_stderr": 0.03610805018031023, "acc_norm": 0.3132530120481928, "acc_norm_stderr": 0.03610805018031023 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.2807017543859649, "acc_stderr": 0.034462962170884265, "acc_norm": 0.2807017543859649, "acc_norm_stderr": 0.034462962170884265 }, "harness|truthfulqa:mc|0": { "mc1": 0.21542227662178703, "mc1_stderr": 0.014391902652427678, "mc2": 0.3521451447553084, "mc2_stderr": 0.013530448314563733 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_KnutJaegersberg__RWKV-pileplus-1B5-evol_instruct_v2
[ "region:us" ]
2023-10-02T08:01:19+00:00
{"pretty_name": "Evaluation run of KnutJaegersberg/RWKV-pileplus-1B5-evol_instruct_v2", "dataset_summary": "Dataset automatically created during the evaluation run of model [KnutJaegersberg/RWKV-pileplus-1B5-evol_instruct_v2](https://huggingface.co/KnutJaegersberg/RWKV-pileplus-1B5-evol_instruct_v2) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 61 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_KnutJaegersberg__RWKV-pileplus-1B5-evol_instruct_v2\",\n\t\"harness_truthfulqa_mc_0\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-10-02T09:01:04.742783](https://huggingface.co/datasets/open-llm-leaderboard/details_KnutJaegersberg__RWKV-pileplus-1B5-evol_instruct_v2/blob/main/results_2023-10-02T09-01-04.742783.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.25494922839990974,\n \"acc_stderr\": 0.0314010444904469,\n \"acc_norm\": 0.25761849126230174,\n \"acc_norm_stderr\": 0.03140672184687226,\n \"mc1\": 0.21542227662178703,\n \"mc1_stderr\": 0.014391902652427678,\n \"mc2\": 0.3521451447553084,\n \"mc2_stderr\": 0.013530448314563733\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.2935153583617747,\n \"acc_stderr\": 0.013307250444941125,\n \"acc_norm\": 0.318259385665529,\n \"acc_norm_stderr\": 0.013611993916971453\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.4223262298346943,\n \"acc_stderr\": 0.0049292048643159725,\n \"acc_norm\": 0.5550687114120693,\n \"acc_norm_stderr\": 0.004959425421382027\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.28,\n \"acc_stderr\": 0.04512608598542127,\n \"acc_norm\": 0.28,\n \"acc_norm_stderr\": 0.04512608598542127\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.3111111111111111,\n \"acc_stderr\": 0.039992628766177214,\n \"acc_norm\": 0.3111111111111111,\n \"acc_norm_stderr\": 0.039992628766177214\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.23684210526315788,\n \"acc_stderr\": 0.03459777606810534,\n \"acc_norm\": 0.23684210526315788,\n \"acc_norm_stderr\": 0.03459777606810534\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.21,\n \"acc_stderr\": 0.040936018074033256,\n \"acc_norm\": 0.21,\n \"acc_norm_stderr\": 0.040936018074033256\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.19622641509433963,\n \"acc_stderr\": 0.024442388131100824,\n \"acc_norm\": 0.19622641509433963,\n \"acc_norm_stderr\": 0.024442388131100824\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.24305555555555555,\n \"acc_stderr\": 0.03586879280080341,\n \"acc_norm\": 0.24305555555555555,\n \"acc_norm_stderr\": 0.03586879280080341\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.15,\n \"acc_stderr\": 0.03588702812826371,\n \"acc_norm\": 0.15,\n \"acc_norm_stderr\": 0.03588702812826371\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.2,\n \"acc_stderr\": 0.04020151261036845,\n \"acc_norm\": 0.2,\n \"acc_norm_stderr\": 0.04020151261036845\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.29,\n \"acc_stderr\": 0.045604802157206845,\n \"acc_norm\": 0.29,\n \"acc_norm_stderr\": 0.045604802157206845\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.24855491329479767,\n \"acc_stderr\": 0.03295304696818317,\n \"acc_norm\": 0.24855491329479767,\n \"acc_norm_stderr\": 0.03295304696818317\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.12745098039215685,\n \"acc_stderr\": 0.033182249219420756,\n \"acc_norm\": 0.12745098039215685,\n \"acc_norm_stderr\": 0.033182249219420756\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.23,\n \"acc_stderr\": 0.04229525846816505,\n \"acc_norm\": 0.23,\n \"acc_norm_stderr\": 0.04229525846816505\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.30638297872340425,\n \"acc_stderr\": 0.03013590647851756,\n \"acc_norm\": 0.30638297872340425,\n \"acc_norm_stderr\": 0.03013590647851756\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.22807017543859648,\n \"acc_stderr\": 0.03947152782669415,\n \"acc_norm\": 0.22807017543859648,\n \"acc_norm_stderr\": 0.03947152782669415\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.2413793103448276,\n \"acc_stderr\": 0.03565998174135303,\n \"acc_norm\": 0.2413793103448276,\n \"acc_norm_stderr\": 0.03565998174135303\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.2671957671957672,\n \"acc_stderr\": 0.02278967314577656,\n \"acc_norm\": 0.2671957671957672,\n \"acc_norm_stderr\": 0.02278967314577656\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.19047619047619047,\n \"acc_stderr\": 0.03512207412302052,\n \"acc_norm\": 0.19047619047619047,\n \"acc_norm_stderr\": 0.03512207412302052\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.31,\n \"acc_stderr\": 0.04648231987117316,\n \"acc_norm\": 0.31,\n \"acc_norm_stderr\": 0.04648231987117316\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.2709677419354839,\n \"acc_stderr\": 0.025284416114900156,\n \"acc_norm\": 0.2709677419354839,\n \"acc_norm_stderr\": 0.025284416114900156\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.2512315270935961,\n \"acc_stderr\": 0.030516530732694436,\n \"acc_norm\": 0.2512315270935961,\n \"acc_norm_stderr\": 0.030516530732694436\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.34,\n \"acc_stderr\": 0.04760952285695235,\n \"acc_norm\": 0.34,\n \"acc_norm_stderr\": 0.04760952285695235\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.2727272727272727,\n \"acc_stderr\": 0.03477691162163659,\n \"acc_norm\": 0.2727272727272727,\n \"acc_norm_stderr\": 0.03477691162163659\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.15656565656565657,\n \"acc_stderr\": 0.025890520358141454,\n \"acc_norm\": 0.15656565656565657,\n \"acc_norm_stderr\": 0.025890520358141454\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.24870466321243523,\n \"acc_stderr\": 0.031195840877700304,\n \"acc_norm\": 0.24870466321243523,\n \"acc_norm_stderr\": 0.031195840877700304\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.2230769230769231,\n \"acc_stderr\": 0.02110773012724399,\n \"acc_norm\": 0.2230769230769231,\n \"acc_norm_stderr\": 0.02110773012724399\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.28888888888888886,\n \"acc_stderr\": 0.027634907264178544,\n \"acc_norm\": 0.28888888888888886,\n \"acc_norm_stderr\": 0.027634907264178544\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.20588235294117646,\n \"acc_stderr\": 0.026265024608275882,\n \"acc_norm\": 0.20588235294117646,\n \"acc_norm_stderr\": 0.026265024608275882\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.19205298013245034,\n \"acc_stderr\": 0.032162984205936156,\n \"acc_norm\": 0.19205298013245034,\n \"acc_norm_stderr\": 0.032162984205936156\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.22018348623853212,\n \"acc_stderr\": 0.01776597865232755,\n \"acc_norm\": 0.22018348623853212,\n \"acc_norm_stderr\": 0.01776597865232755\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.30092592592592593,\n \"acc_stderr\": 0.03128039084329882,\n \"acc_norm\": 0.30092592592592593,\n \"acc_norm_stderr\": 0.03128039084329882\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.27941176470588236,\n \"acc_stderr\": 0.031493281045079556,\n \"acc_norm\": 0.27941176470588236,\n \"acc_norm_stderr\": 0.031493281045079556\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.27848101265822783,\n \"acc_stderr\": 0.02917868230484253,\n \"acc_norm\": 0.27848101265822783,\n \"acc_norm_stderr\": 0.02917868230484253\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.2556053811659193,\n \"acc_stderr\": 0.029275891003969927,\n \"acc_norm\": 0.2556053811659193,\n \"acc_norm_stderr\": 0.029275891003969927\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.22900763358778625,\n \"acc_stderr\": 0.036853466317118506,\n \"acc_norm\": 0.22900763358778625,\n \"acc_norm_stderr\": 0.036853466317118506\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.38016528925619836,\n \"acc_stderr\": 0.04431324501968432,\n \"acc_norm\": 0.38016528925619836,\n \"acc_norm_stderr\": 0.04431324501968432\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.17592592592592593,\n \"acc_stderr\": 0.03680918141673881,\n \"acc_norm\": 0.17592592592592593,\n \"acc_norm_stderr\": 0.03680918141673881\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.25766871165644173,\n \"acc_stderr\": 0.03436150827846917,\n \"acc_norm\": 0.25766871165644173,\n \"acc_norm_stderr\": 0.03436150827846917\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.30357142857142855,\n \"acc_stderr\": 0.04364226155841044,\n \"acc_norm\": 0.30357142857142855,\n \"acc_norm_stderr\": 0.04364226155841044\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.2524271844660194,\n \"acc_stderr\": 0.04301250399690877,\n \"acc_norm\": 0.2524271844660194,\n \"acc_norm_stderr\": 0.04301250399690877\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.28205128205128205,\n \"acc_stderr\": 0.029480360549541194,\n \"acc_norm\": 0.28205128205128205,\n \"acc_norm_stderr\": 0.029480360549541194\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.28,\n \"acc_stderr\": 0.045126085985421276,\n \"acc_norm\": 0.28,\n \"acc_norm_stderr\": 0.045126085985421276\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.2515964240102171,\n \"acc_stderr\": 0.015517322365529619,\n \"acc_norm\": 0.2515964240102171,\n \"acc_norm_stderr\": 0.015517322365529619\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.30057803468208094,\n \"acc_stderr\": 0.0246853168672578,\n \"acc_norm\": 0.30057803468208094,\n \"acc_norm_stderr\": 0.0246853168672578\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.2223463687150838,\n \"acc_stderr\": 0.013907189208156881,\n \"acc_norm\": 0.2223463687150838,\n \"acc_norm_stderr\": 0.013907189208156881\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.24836601307189543,\n \"acc_stderr\": 0.02473998135511359,\n \"acc_norm\": 0.24836601307189543,\n \"acc_norm_stderr\": 0.02473998135511359\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.2797427652733119,\n \"acc_stderr\": 0.025494259350694888,\n \"acc_norm\": 0.2797427652733119,\n \"acc_norm_stderr\": 0.025494259350694888\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.2623456790123457,\n \"acc_stderr\": 0.024477222856135118,\n \"acc_norm\": 0.2623456790123457,\n \"acc_norm_stderr\": 0.024477222856135118\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.2907801418439716,\n \"acc_stderr\": 0.027090664368353178,\n \"acc_norm\": 0.2907801418439716,\n \"acc_norm_stderr\": 0.027090664368353178\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.27183833116036504,\n \"acc_stderr\": 0.011363135278651411,\n \"acc_norm\": 0.27183833116036504,\n \"acc_norm_stderr\": 0.011363135278651411\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.19852941176470587,\n \"acc_stderr\": 0.02423101337054109,\n \"acc_norm\": 0.19852941176470587,\n \"acc_norm_stderr\": 0.02423101337054109\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.2777777777777778,\n \"acc_stderr\": 0.01812022425148458,\n \"acc_norm\": 0.2777777777777778,\n \"acc_norm_stderr\": 0.01812022425148458\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.2636363636363636,\n \"acc_stderr\": 0.04220224692971987,\n \"acc_norm\": 0.2636363636363636,\n \"acc_norm_stderr\": 0.04220224692971987\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.22857142857142856,\n \"acc_stderr\": 0.026882144922307744,\n \"acc_norm\": 0.22857142857142856,\n \"acc_norm_stderr\": 0.026882144922307744\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.23383084577114427,\n \"acc_stderr\": 0.029929415408348384,\n \"acc_norm\": 0.23383084577114427,\n \"acc_norm_stderr\": 0.029929415408348384\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.19,\n \"acc_stderr\": 0.039427724440366234,\n \"acc_norm\": 0.19,\n \"acc_norm_stderr\": 0.039427724440366234\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.3132530120481928,\n \"acc_stderr\": 0.03610805018031023,\n \"acc_norm\": 0.3132530120481928,\n \"acc_norm_stderr\": 0.03610805018031023\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.2807017543859649,\n \"acc_stderr\": 0.034462962170884265,\n \"acc_norm\": 0.2807017543859649,\n \"acc_norm_stderr\": 0.034462962170884265\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.21542227662178703,\n \"mc1_stderr\": 0.014391902652427678,\n \"mc2\": 0.3521451447553084,\n \"mc2_stderr\": 0.013530448314563733\n }\n}\n```", "repo_url": "https://huggingface.co/KnutJaegersberg/RWKV-pileplus-1B5-evol_instruct_v2", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|arc:challenge|25_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hellaswag|10_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-management|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-virology|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-management|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-virology|5_2023-10-02T09-01-04.742783.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-management|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-virology|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["**/details_harness|truthfulqa:mc|0_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-10-02T09-01-04.742783.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_10_02T09_01_04.742783", "path": ["results_2023-10-02T09-01-04.742783.parquet"]}, {"split": "latest", "path": ["results_2023-10-02T09-01-04.742783.parquet"]}]}]}
2023-10-02T08:02:17+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of KnutJaegersberg/RWKV-pileplus-1B5-evol_instruct_v2 ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model KnutJaegersberg/RWKV-pileplus-1B5-evol_instruct_v2 on the Open LLM Leaderboard. The dataset is composed of 61 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-10-02T09:01:04.742783(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of KnutJaegersberg/RWKV-pileplus-1B5-evol_instruct_v2", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model KnutJaegersberg/RWKV-pileplus-1B5-evol_instruct_v2 on the Open LLM Leaderboard.\n\nThe dataset is composed of 61 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-02T09:01:04.742783(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of KnutJaegersberg/RWKV-pileplus-1B5-evol_instruct_v2", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model KnutJaegersberg/RWKV-pileplus-1B5-evol_instruct_v2 on the Open LLM Leaderboard.\n\nThe dataset is composed of 61 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-02T09:01:04.742783(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 32, 31, 180, 66, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of KnutJaegersberg/RWKV-pileplus-1B5-evol_instruct_v2## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model KnutJaegersberg/RWKV-pileplus-1B5-evol_instruct_v2 on the Open LLM Leaderboard.\n\nThe dataset is composed of 61 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-10-02T09:01:04.742783(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
ea68d3acc5c652ad244e16df82afaeb3f2ade43a
# Bangumi Image Base of Sora Yori Mo Tooi Basho This is the image base of bangumi Sora yori mo Tooi Basho, we detected 20 characters, 2192 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 445 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 76 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 359 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 94 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 220 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 62 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 111 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 301 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 101 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 23 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 13 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 16 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 72 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 19 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 48 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 56 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 70 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 19 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 12 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | noise | 75 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/sorayorimotooibasho
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-02T08:05:49+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-02T09:25:41+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Sora Yori Mo Tooi Basho ============================================= This is the image base of bangumi Sora yori mo Tooi Basho, we detected 20 characters, 2192 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
79a2ca7c4e441605d970584a9cc4db292627c68a
# Bangumi Image Base of Yuru Yuri This is the image base of bangumi Yuru Yuri, we detected 31 characters, 5219 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 45 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 429 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 55 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 680 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 54 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 26 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 431 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 443 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 334 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 22 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 103 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 331 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 25 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 14 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 21 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 10 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 324 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 70 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 23 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 19 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 9 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 16 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 39 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 918 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 449 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 22 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 20 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 15 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 19 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | ![preview 8](28/preview_8.png) | | 29 | 21 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | ![preview 7](29/preview_7.png) | ![preview 8](29/preview_8.png) | | noise | 232 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/yuruyuri
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-02T08:25:11+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-02T10:40:58+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Yuru Yuri =============================== This is the image base of bangumi Yuru Yuri, we detected 31 characters, 5219 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
d5860aa900e1c9a169e35cf37bdbc5152f8762e6
# Dataset Card for "a9adf6d9" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
result-kand2-sdxl-wuerst-karlo/a9adf6d9
[ "region:us" ]
2023-10-02T08:28:02+00:00
{"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 182, "num_examples": 10}], "download_size": 1395, "dataset_size": 182}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-02T08:28:03+00:00
[]
[]
TAGS #region-us
# Dataset Card for "a9adf6d9" More Information needed
[ "# Dataset Card for \"a9adf6d9\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"a9adf6d9\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"a9adf6d9\"\n\nMore Information needed" ]
6e6cdc70126070004839318116aeee3dd5daa398
The french subset of the dataset [Multilingual TEDx](https://www.openslr.org/100). The data uploaded to HF corresponds to the directory fr-fr. The audio files are automatically resampled to 16 kHz. #### Configs: - single_samples (default): all samples taken separately - max=30s: combine consecutive samples for a period shorter than 30 seconds - max=10s: combine consecutive samples for a period shorter than 10 seconds - max: combine all the samples of a TEDx talk #### dependencies (only needed for much faster audio decoding): - ffmpeg: apt install ffmpeg - ffmpeg-python: pip install ffmpeg-python #### Sample ``` {'file': '0u7tTptBo9I-0', 'audio': {'path': None, 'array': array([ 3.05175781e-05, 6.10351562e-05, 9.15527344e-05, ..., -2.44140625e-04, -3.35693359e-04, -2.74658203e-04]), 'sampling_rate': 16000}, 'sentence': "Bonsoir ! Notre planète est recouverte à 70 % d'océan, et pourtant, étrangement, on a choisi de l'appeler « la Terre ». Le poète Heathcote Williams a une vision bien plus objective et moins anthropocentrique, quand il dit que « Vue de l'espace, la planète est bleue. Vue de l'espace, elle est le territoire, non pas des hommes, mais des baleines ». Et pourtant, on vient tous de l'océan. ", 'speaker_id': '0u7tTptBo9I', 'start_timestamp': 17.25, 'end_timestamp': 45.26, 'index': 0} ``` ``` @inproceedings{salesky2021mtedx, title={Multilingual TEDx Corpus for Speech Recognition and Translation}, author={Elizabeth Salesky and Matthew Wiesner and Jacob Bremerman and Roldano Cattoni and Matteo Negri and Marco Turchi and Douglas W. Oard and Matt Post}, booktitle={Proceedings of Interspeech}, year={2021}, } ```
BrunoHays/multilingual-TEDX-fr
[ "task_categories:automatic-speech-recognition", "size_categories:100K<n<1M", "language:fr", "license:cc-by-nc-nd-4.0", "region:us" ]
2023-10-02T08:39:41+00:00
{"language": ["fr"], "license": "cc-by-nc-nd-4.0", "size_categories": ["100K<n<1M"], "task_categories": ["automatic-speech-recognition"]}
2023-10-23T08:41:59+00:00
[]
[ "fr" ]
TAGS #task_categories-automatic-speech-recognition #size_categories-100K<n<1M #language-French #license-cc-by-nc-nd-4.0 #region-us
The french subset of the dataset Multilingual TEDx. The data uploaded to HF corresponds to the directory fr-fr. The audio files are automatically resampled to 16 kHz. #### Configs: - single_samples (default): all samples taken separately - max=30s: combine consecutive samples for a period shorter than 30 seconds - max=10s: combine consecutive samples for a period shorter than 10 seconds - max: combine all the samples of a TEDx talk #### dependencies (only needed for much faster audio decoding): - ffmpeg: apt install ffmpeg - ffmpeg-python: pip install ffmpeg-python #### Sample
[ "#### Configs:\n- single_samples (default): all samples taken separately\n- max=30s: combine consecutive samples for a period shorter than 30 seconds\n- max=10s: combine consecutive samples for a period shorter than 10 seconds\n- max: combine all the samples of a TEDx talk", "#### dependencies (only needed for much faster audio decoding):\n - ffmpeg: apt install ffmpeg\n - ffmpeg-python: pip install ffmpeg-python", "#### Sample" ]
[ "TAGS\n#task_categories-automatic-speech-recognition #size_categories-100K<n<1M #language-French #license-cc-by-nc-nd-4.0 #region-us \n", "#### Configs:\n- single_samples (default): all samples taken separately\n- max=30s: combine consecutive samples for a period shorter than 30 seconds\n- max=10s: combine consecutive samples for a period shorter than 10 seconds\n- max: combine all the samples of a TEDx talk", "#### dependencies (only needed for much faster audio decoding):\n - ffmpeg: apt install ffmpeg\n - ffmpeg-python: pip install ffmpeg-python", "#### Sample" ]
[ 53, 71, 45, 4 ]
[ "passage: TAGS\n#task_categories-automatic-speech-recognition #size_categories-100K<n<1M #language-French #license-cc-by-nc-nd-4.0 #region-us \n#### Configs:\n- single_samples (default): all samples taken separately\n- max=30s: combine consecutive samples for a period shorter than 30 seconds\n- max=10s: combine consecutive samples for a period shorter than 10 seconds\n- max: combine all the samples of a TEDx talk#### dependencies (only needed for much faster audio decoding):\n - ffmpeg: apt install ffmpeg\n - ffmpeg-python: pip install ffmpeg-python#### Sample" ]
34f3052f95923455dcbdbf71fc3fd396e0d2bed5
# Dataset Card for "giant-midi-sustain-masked" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
JasiekKaczmarczyk/giant-midi-sustain-masked
[ "region:us" ]
2023-10-02T08:46:21+00:00
{"dataset_info": {"features": [{"name": "midi_filename", "dtype": "string"}, {"name": "source", "dtype": "string"}, {"name": "pitch", "sequence": "int16", "length": 128}, {"name": "start", "sequence": "float32", "length": 128}, {"name": "dstart", "sequence": "float32", "length": 128}, {"name": "duration", "sequence": "float32", "length": 128}, {"name": "velocity", "sequence": "int16", "length": 128}, {"name": "masking_spaces", "struct": [{"name": "<Random Mask>", "sequence": "bool", "length": 128}, {"name": "<LH Mask>", "sequence": "bool", "length": 128}, {"name": "<RH Mask>", "sequence": "bool", "length": 128}, {"name": "<Harmonic Root Mask>", "sequence": "bool", "length": 128}, {"name": "<Harmonic Outliers Mask>", "sequence": "bool", "length": 128}]}], "splits": [{"name": "train", "num_bytes": 574785389, "num_examples": 238926}, {"name": "validation", "num_bytes": 68225196, "num_examples": 28367}, {"name": "test", "num_bytes": 71425664, "num_examples": 29707}], "download_size": 305011106, "dataset_size": 714436249}}
2023-11-30T09:28:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "giant-midi-sustain-masked" More Information needed
[ "# Dataset Card for \"giant-midi-sustain-masked\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"giant-midi-sustain-masked\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"giant-midi-sustain-masked\"\n\nMore Information needed" ]
474ab5b50b05d95d3b43314b43be07ef0eb463ac
# Bangumi Image Base of Fate/zero This is the image base of bangumi Fate/Zero, we detected 26 characters, 2067 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 145 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 14 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 244 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 109 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 285 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 151 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 71 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 40 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 36 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 70 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 27 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 16 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 14 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 23 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 16 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 167 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 72 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 59 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 34 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 9 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 286 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 17 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 25 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 20 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 6 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | N/A | N/A | | noise | 111 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/fatezero
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-02T09:03:59+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-02T10:59:05+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Fate/zero =============================== This is the image base of bangumi Fate/Zero, we detected 26 characters, 2067 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
f1afccc343ca9a8fd990322a8262a9e46dc54ef6
PS: More data (40k) can be found here [Mxode/StackOverflow-QA-C-Language-40k](https://huggingface.co/datasets/Mxode/StackOverflow-QA-C-Language-40k). --- This is a collection of ~5000 QA's in **C Language** from StackOverflow. The data has been initially cleaned, and each response is with **Accepted Answer**. All data is **<500** in length. The questions and answers were organized into a **one-line** format. A sample format is shown below: ```json { "question": "```\nFILE* file = fopen(some file)\n\npcap_t* pd = pcap_fopen_offline(file)\n\npcap_close(pd)\n\nfclose(file)\n```\n\nThis code occurs double free error.\n\nCould you explain about this happening?\n\nMy Guess is that pd and file pointers are sharing some datas.\n", "answer": "As the documentation says, thepcap_closefunction closes the files associated with thepcap_tstructure passed to it. Closing the file again withfcloseis an error.\n" } ```
Mxode/StackOverflow-QA-C-Language-5k
[ "task_categories:question-answering", "size_categories:1K<n<10K", "language:en", "license:apache-2.0", "code", "region:us" ]
2023-10-02T09:08:11+00:00
{"language": ["en"], "license": "apache-2.0", "size_categories": ["1K<n<10K"], "task_categories": ["question-answering"], "tags": ["code"]}
2024-01-09T14:19:26+00:00
[]
[ "en" ]
TAGS #task_categories-question-answering #size_categories-1K<n<10K #language-English #license-apache-2.0 #code #region-us
PS: More data (40k) can be found here Mxode/StackOverflow-QA-C-Language-40k. --- This is a collection of ~5000 QA's in C Language from StackOverflow. The data has been initially cleaned, and each response is with Accepted Answer. All data is <500 in length. The questions and answers were organized into a one-line format. A sample format is shown below: \nFILE* file = fopen(some file)\n\npcap_t* pd = pcap_fopen_offline(file)\n\npcap_close(pd)\n\nfclose(file)\n
[]
[ "TAGS\n#task_categories-question-answering #size_categories-1K<n<10K #language-English #license-apache-2.0 #code #region-us \n" ]
[ 44 ]
[ "passage: TAGS\n#task_categories-question-answering #size_categories-1K<n<10K #language-English #license-apache-2.0 #code #region-us \n" ]
a23e5b963e189059bd0dd12d36108afadf3ab173
This is a collection of ~40k QA's in **C Language** from StackOverflow. The data has been initially cleaned, and each response is with **Accepted Answer**. All data is **<1000** in length. The questions and answers were organized into a **one-line** format. A sample format is shown below: ```json { "question": "```\nFILE* file = fopen(some file)\n\npcap_t* pd = pcap_fopen_offline(file)\n\npcap_close(pd)\n\nfclose(file)\n```\n\nThis code occurs double free error.\n\nCould you explain about this happening?\n\nMy Guess is that pd and file pointers are sharing some datas.\n", "answer": "As the documentation says, thepcap_closefunction closes the files associated with thepcap_tstructure passed to it. Closing the file again withfcloseis an error.\n" } ```
Mxode/StackOverflow-QA-C-Language-40k
[ "task_categories:question-answering", "size_categories:10K<n<100K", "language:en", "license:apache-2.0", "code", "region:us" ]
2023-10-02T09:28:14+00:00
{"language": ["en"], "license": "apache-2.0", "size_categories": ["10K<n<100K"], "task_categories": ["question-answering"], "tags": ["code"]}
2023-10-02T09:30:22+00:00
[]
[ "en" ]
TAGS #task_categories-question-answering #size_categories-10K<n<100K #language-English #license-apache-2.0 #code #region-us
This is a collection of ~40k QA's in C Language from StackOverflow. The data has been initially cleaned, and each response is with Accepted Answer. All data is <1000 in length. The questions and answers were organized into a one-line format. A sample format is shown below: \nFILE* file = fopen(some file)\n\npcap_t* pd = pcap_fopen_offline(file)\n\npcap_close(pd)\n\nfclose(file)\n
[]
[ "TAGS\n#task_categories-question-answering #size_categories-10K<n<100K #language-English #license-apache-2.0 #code #region-us \n" ]
[ 44 ]
[ "passage: TAGS\n#task_categories-question-answering #size_categories-10K<n<100K #language-English #license-apache-2.0 #code #region-us \n" ]
51b30b77cb4d08416aacd883f74263876fd2955c
# Dataset Card for "llama-2-nuv-intent-big-multi" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Luciya/llama-2-nuv-intent-big-multi
[ "region:us" ]
2023-10-02T09:41:19+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 862786, "num_examples": 1563}], "download_size": 132778, "dataset_size": 862786}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-02T09:41:23+00:00
[]
[]
TAGS #region-us
# Dataset Card for "llama-2-nuv-intent-big-multi" More Information needed
[ "# Dataset Card for \"llama-2-nuv-intent-big-multi\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"llama-2-nuv-intent-big-multi\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"llama-2-nuv-intent-big-multi\"\n\nMore Information needed" ]
2714ca61044ab5e9154a63e05ecaef250a472186
# Dataset Card for "maestro-rollingsplit" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
SneakyInsect/maestro-rollingsplit
[ "region:us" ]
2023-10-02T10:02:50+00:00
{"dataset_info": {"features": [{"name": "name", "dtype": "string"}, {"name": "start", "sequence": "float64"}, {"name": "duration", "sequence": "float64"}, {"name": "pitch", "sequence": "float64"}, {"name": "velocity", "sequence": "float64"}], "splits": [{"name": "train", "num_bytes": 745208510, "num_examples": 373963}, {"name": "validation", "num_bytes": 84002977, "num_examples": 42153}, {"name": "test", "num_bytes": 97390221, "num_examples": 48820}], "download_size": 144295382, "dataset_size": 926601708}}
2023-10-04T12:21:21+00:00
[]
[]
TAGS #region-us
# Dataset Card for "maestro-rollingsplit" More Information needed
[ "# Dataset Card for \"maestro-rollingsplit\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"maestro-rollingsplit\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"maestro-rollingsplit\"\n\nMore Information needed" ]
7c8a172aa546f01f0b67b50eb362f34601bf0f8f
# Dataset Card for "pianofor-ai-sustain-masked" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
JasiekKaczmarczyk/pianofor-ai-sustain-masked
[ "region:us" ]
2023-10-02T10:07:50+00:00
{"dataset_info": {"features": [{"name": "midi_filename", "dtype": "string"}, {"name": "source", "dtype": "string"}, {"name": "pitch", "sequence": "int16", "length": 128}, {"name": "start", "sequence": "float32", "length": 128}, {"name": "dstart", "sequence": "float32", "length": 128}, {"name": "duration", "sequence": "float32", "length": 128}, {"name": "velocity", "sequence": "int16", "length": 128}, {"name": "masking_spaces", "struct": [{"name": "<Random Mask>", "sequence": "bool", "length": 128}, {"name": "<LH Mask>", "sequence": "bool", "length": 128}, {"name": "<RH Mask>", "sequence": "bool", "length": 128}, {"name": "<Harmonic Root Mask>", "sequence": "bool", "length": 128}, {"name": "<Harmonic Outliers Mask>", "sequence": "bool", "length": 128}]}], "splits": [{"name": "train", "num_bytes": 454163007, "num_examples": 189001}, {"name": "validation", "num_bytes": 43536465, "num_examples": 18262}, {"name": "test", "num_bytes": 52054314, "num_examples": 21576}], "download_size": 319101693, "dataset_size": 549753786}}
2023-11-30T09:16:17+00:00
[]
[]
TAGS #region-us
# Dataset Card for "pianofor-ai-sustain-masked" More Information needed
[ "# Dataset Card for \"pianofor-ai-sustain-masked\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"pianofor-ai-sustain-masked\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"pianofor-ai-sustain-masked\"\n\nMore Information needed" ]
ad1909f62799d7b1e3c356cbdc65dcc264456141
# Dataset Card for "ip2p-adwm-5000" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
FelixdoingAI/ip2p-adwm-5000
[ "region:us" ]
2023-10-02T10:18:04+00:00
{"dataset_info": {"features": [{"name": "original_prompt", "dtype": "string"}, {"name": "original_image", "dtype": "image"}, {"name": "edit_prompt", "dtype": "string"}, {"name": "edited_prompt", "dtype": "string"}, {"name": "edited_image", "dtype": "image"}, {"name": "adversarial_image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 2287964054.0, "num_examples": 5000}], "download_size": 2287986553, "dataset_size": 2287964054.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2024-01-26T17:37:36+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ip2p-adwm-5000" More Information needed
[ "# Dataset Card for \"ip2p-adwm-5000\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ip2p-adwm-5000\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ip2p-adwm-5000\"\n\nMore Information needed" ]
461ac4a80ff8c72bf998f3460648a7125a1e4b3e
# Dataset Card for "daniel_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
jaredthejelly/daniel_dataset
[ "region:us" ]
2023-10-02T10:52:46+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 148498407, "num_examples": 36636}], "download_size": 70484621, "dataset_size": 148498407}}
2023-10-02T10:52:53+00:00
[]
[]
TAGS #region-us
# Dataset Card for "daniel_dataset" More Information needed
[ "# Dataset Card for \"daniel_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"daniel_dataset\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"daniel_dataset\"\n\nMore Information needed" ]
26308a5dfa44e1d38bb682a47b592b2d8bbdb197
# Dataset Card for "masked-maestro-v3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
roszcz/masked-maestro-v3
[ "region:us" ]
2023-10-02T11:02:32+00:00
{"dataset_info": {"features": [{"name": "pitch", "sequence": "int8", "length": 90}, {"name": "start", "sequence": "float64", "length": 90}, {"name": "dstart", "sequence": "float64", "length": 90}, {"name": "end", "sequence": "float64", "length": 90}, {"name": "duration", "sequence": "float64", "length": 90}, {"name": "velocity", "sequence": "int8", "length": 90}, {"name": "source", "dtype": "string"}, {"name": "masking_space", "struct": [{"name": "<Random Mask>", "sequence": "bool", "length": 90}, {"name": "<LH Mask>", "sequence": "bool", "length": 90}, {"name": "<RH Mask>", "sequence": "bool", "length": 90}, {"name": "<Harmonic Root Mask>", "sequence": "bool", "length": 90}, {"name": "<Harmonic Outliers Mask>", "sequence": "bool", "length": 90}]}], "splits": [{"name": "test", "num_bytes": 472275625, "num_examples": 136870}, {"name": "validation", "num_bytes": 407260307, "num_examples": 118080}, {"name": "train", "num_bytes": 3605902471, "num_examples": 1045755}], "download_size": 4317450762, "dataset_size": 4485438403}}
2023-10-02T14:21:06+00:00
[]
[]
TAGS #region-us
# Dataset Card for "masked-maestro-v3" More Information needed
[ "# Dataset Card for \"masked-maestro-v3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"masked-maestro-v3\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"masked-maestro-v3\"\n\nMore Information needed" ]
660b6c4de52d9ae5ef71d1928e3aaa519656bfcf
# Dataset Card for Spotify Million Song Dataset ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** https://kaggle.com/datasets/notshrirang/spotify-million-song-dataset - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This is Spotify Million Song Dataset. This dataset contains song names, artists names, link to the song and lyrics. This dataset can be used for recommending songs, classifying or clustering songs. ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators This dataset was shared by [@notshrirang](https://kaggle.com/notshrirang) ### Licensing Information The license for this dataset is cc0-1.0 ### Citation Information ```bibtex [More Information Needed] ``` ### Contributions [More Information Needed]
vishnupriyavr/spotify-million-song-dataset
[ "license:cc0-1.0", "region:us" ]
2023-10-02T11:16:58+00:00
{"license": ["cc0-1.0"], "converted_from": "kaggle", "kaggle_id": "notshrirang/spotify-million-song-dataset"}
2023-10-02T11:17:02+00:00
[]
[]
TAGS #license-cc0-1.0 #region-us
# Dataset Card for Spotify Million Song Dataset ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: URL - Repository: - Paper: - Leaderboard: - Point of Contact: ### Dataset Summary This is Spotify Million Song Dataset. This dataset contains song names, artists names, link to the song and lyrics. This dataset can be used for recommending songs, classifying or clustering songs. ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators This dataset was shared by @notshrirang ### Licensing Information The license for this dataset is cc0-1.0 ### Contributions
[ "# Dataset Card for Spotify Million Song Dataset", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage: URL\n- Repository:\n- Paper:\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary\n\nThis is Spotify Million Song Dataset. This dataset contains song names, artists names, link to the song and lyrics. This dataset can be used for recommending songs, classifying or clustering songs.", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators\n\nThis dataset was shared by @notshrirang", "### Licensing Information\n\nThe license for this dataset is cc0-1.0", "### Contributions" ]
[ "TAGS\n#license-cc0-1.0 #region-us \n", "# Dataset Card for Spotify Million Song Dataset", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage: URL\n- Repository:\n- Paper:\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary\n\nThis is Spotify Million Song Dataset. This dataset contains song names, artists names, link to the song and lyrics. This dataset can be used for recommending songs, classifying or clustering songs.", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators\n\nThis dataset was shared by @notshrirang", "### Licensing Information\n\nThe license for this dataset is cc0-1.0", "### Contributions" ]
[ 14, 10, 125, 25, 51, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 17, 18, 5 ]
[ "passage: TAGS\n#license-cc0-1.0 #region-us \n# Dataset Card for Spotify Million Song Dataset## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions## Dataset Description\n\n- Homepage: URL\n- Repository:\n- Paper:\n- Leaderboard:\n- Point of Contact:### Dataset Summary\n\nThis is Spotify Million Song Dataset. This dataset contains song names, artists names, link to the song and lyrics. This dataset can be used for recommending songs, classifying or clustering songs.### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators\n\nThis dataset was shared by @notshrirang### Licensing Information\n\nThe license for this dataset is cc0-1.0### Contributions" ]
98ec3b5a45c0366e645cda07f55be6d2a66fe2dd
# Dataset Card for "7162bca1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
result-kand2-sdxl-wuerst-karlo/7162bca1
[ "region:us" ]
2023-10-02T11:26:32+00:00
{"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 227, "num_examples": 10}], "download_size": 1422, "dataset_size": 227}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-02T11:26:33+00:00
[]
[]
TAGS #region-us
# Dataset Card for "7162bca1" More Information needed
[ "# Dataset Card for \"7162bca1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"7162bca1\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"7162bca1\"\n\nMore Information needed" ]
eda865f771a2f5e31d8737f830e61de04cfdff67
# Dataset Card for "8c351c30" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
result-kand2-sdxl-wuerst-karlo/8c351c30
[ "region:us" ]
2023-10-02T11:29:33+00:00
{"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 180, "num_examples": 10}], "download_size": 1362, "dataset_size": 180}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-02T11:29:34+00:00
[]
[]
TAGS #region-us
# Dataset Card for "8c351c30" More Information needed
[ "# Dataset Card for \"8c351c30\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"8c351c30\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"8c351c30\"\n\nMore Information needed" ]
a7cb1bf474f5f12910238214cc1435fe135c852e
# Bangumi Image Base of Flip Flappers This is the image base of bangumi Flip Flappers, we detected 26 characters, 1442 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 423 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 62 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 31 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 37 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 8 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 64 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 41 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 23 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 269 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 8 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 21 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 21 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 56 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 35 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 6 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | N/A | N/A | | 15 | 32 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 15 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 9 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 17 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 25 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 18 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 40 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 16 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 6 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | N/A | N/A | | 24 | 7 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | N/A | | noise | 152 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/flipflappers
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-02T11:32:01+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-02T12:29:12+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Flip Flappers =================================== This is the image base of bangumi Flip Flappers, we detected 26 characters, 1442 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
af73c3fd76d334883de94b04011ba3ad27c6f0c2
# Dataset Card for "38127251" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
result-kand2-sdxl-wuerst-karlo/38127251
[ "region:us" ]
2023-10-02T11:34:43+00:00
{"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 193, "num_examples": 10}], "download_size": 1396, "dataset_size": 193}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-02T11:34:44+00:00
[]
[]
TAGS #region-us
# Dataset Card for "38127251" More Information needed
[ "# Dataset Card for \"38127251\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"38127251\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"38127251\"\n\nMore Information needed" ]
8eb50e415778eaed762e200f199dc4b82e2c943a
# CIVQA EasyOCR Validation Dataset The CIVQA (Czech Invoice Visual Question Answering) dataset was created with EasyOCR. This dataset contains only the validation split. The train part of the dataset can be found on this URL: https://huggingface.co/datasets/fimu-docproc-research/CIVQA_EasyOCR_Train The encoded validation dataset for the LayoutLM can be found on this link: https://huggingface.co/datasets/fimu-docproc-research/CIVQA_EasyOCR_LayoutLM_Validation All invoices used in this dataset were obtained from public sources. Over these invoices, we were focusing on 15 different entities, which are crucial for processing the invoices. - Invoice number - Variable symbol - Specific symbol - Constant symbol - Bank code - Account number - ICO - Total amount - Invoice date - Due date - Name of supplier - IBAN - DIC - QR code - Supplier's address The invoices included in this dataset were gathered from the internet. We understand that privacy is of utmost importance. Therefore, we sincerely apologise for any inconvenience caused by including your identifiable information in this dataset. If you have identified your data in this dataset and wish to have it removed from research purposes, we request you kindly to access the following URL: https://forms.gle/tUVJKoB22oeTncUD6 We profoundly appreciate your cooperation and understanding in this matter.
fimu-docproc-research/CIVQA_EasyOCR_Validation
[ "language:cs", "license:mit", "finance", "region:us" ]
2023-10-02T11:35:11+00:00
{"language": ["cs"], "license": "mit", "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "words", "sequence": "string"}, {"name": "answers", "dtype": "string"}, {"name": "bboxes", "sequence": {"sequence": "float64"}}, {"name": "answers_bboxes", "sequence": {"sequence": "float64"}}, {"name": "questions", "dtype": "string"}, {"name": "image", "dtype": "string"}], "splits": [{"name": "validation", "num_bytes": 48446674074, "num_examples": 34159}], "download_size": 10985782991, "dataset_size": 48446674074}, "tags": ["finance"]}
2023-11-21T20:48:07+00:00
[]
[ "cs" ]
TAGS #language-Czech #license-mit #finance #region-us
# CIVQA EasyOCR Validation Dataset The CIVQA (Czech Invoice Visual Question Answering) dataset was created with EasyOCR. This dataset contains only the validation split. The train part of the dataset can be found on this URL: URL The encoded validation dataset for the LayoutLM can be found on this link: URL All invoices used in this dataset were obtained from public sources. Over these invoices, we were focusing on 15 different entities, which are crucial for processing the invoices. - Invoice number - Variable symbol - Specific symbol - Constant symbol - Bank code - Account number - ICO - Total amount - Invoice date - Due date - Name of supplier - IBAN - DIC - QR code - Supplier's address The invoices included in this dataset were gathered from the internet. We understand that privacy is of utmost importance. Therefore, we sincerely apologise for any inconvenience caused by including your identifiable information in this dataset. If you have identified your data in this dataset and wish to have it removed from research purposes, we request you kindly to access the following URL: URL We profoundly appreciate your cooperation and understanding in this matter.
[ "# CIVQA EasyOCR Validation Dataset\n\nThe CIVQA (Czech Invoice Visual Question Answering) dataset was created with EasyOCR. This dataset contains only the validation split. The train part of the dataset can be found on this URL: URL \nThe encoded validation dataset for the LayoutLM can be found on this link: URL\n\nAll invoices used in this dataset were obtained from public sources. Over these invoices, we were focusing on 15 different entities, which are crucial for processing the invoices.\n- Invoice number\n- Variable symbol\n- Specific symbol\n- Constant symbol\n- Bank code\n- Account number\n- ICO\n- Total amount\n- Invoice date\n- Due date\n- Name of supplier\n- IBAN\n- DIC\n- QR code\n- Supplier's address\n\nThe invoices included in this dataset were gathered from the internet. We understand that privacy is of utmost importance. Therefore, we sincerely apologise for any inconvenience caused by including your identifiable information in this dataset. If you have identified your data in this dataset and wish to have it removed from research purposes, we request you kindly to access the following URL: URL\n\nWe profoundly appreciate your cooperation and understanding in this matter." ]
[ "TAGS\n#language-Czech #license-mit #finance #region-us \n", "# CIVQA EasyOCR Validation Dataset\n\nThe CIVQA (Czech Invoice Visual Question Answering) dataset was created with EasyOCR. This dataset contains only the validation split. The train part of the dataset can be found on this URL: URL \nThe encoded validation dataset for the LayoutLM can be found on this link: URL\n\nAll invoices used in this dataset were obtained from public sources. Over these invoices, we were focusing on 15 different entities, which are crucial for processing the invoices.\n- Invoice number\n- Variable symbol\n- Specific symbol\n- Constant symbol\n- Bank code\n- Account number\n- ICO\n- Total amount\n- Invoice date\n- Due date\n- Name of supplier\n- IBAN\n- DIC\n- QR code\n- Supplier's address\n\nThe invoices included in this dataset were gathered from the internet. We understand that privacy is of utmost importance. Therefore, we sincerely apologise for any inconvenience caused by including your identifiable information in this dataset. If you have identified your data in this dataset and wish to have it removed from research purposes, we request you kindly to access the following URL: URL\n\nWe profoundly appreciate your cooperation and understanding in this matter." ]
[ 20, 283 ]
[ "passage: TAGS\n#language-Czech #license-mit #finance #region-us \n# CIVQA EasyOCR Validation Dataset\n\nThe CIVQA (Czech Invoice Visual Question Answering) dataset was created with EasyOCR. This dataset contains only the validation split. The train part of the dataset can be found on this URL: URL \nThe encoded validation dataset for the LayoutLM can be found on this link: URL\n\nAll invoices used in this dataset were obtained from public sources. Over these invoices, we were focusing on 15 different entities, which are crucial for processing the invoices.\n- Invoice number\n- Variable symbol\n- Specific symbol\n- Constant symbol\n- Bank code\n- Account number\n- ICO\n- Total amount\n- Invoice date\n- Due date\n- Name of supplier\n- IBAN\n- DIC\n- QR code\n- Supplier's address\n\nThe invoices included in this dataset were gathered from the internet. We understand that privacy is of utmost importance. Therefore, we sincerely apologise for any inconvenience caused by including your identifiable information in this dataset. If you have identified your data in this dataset and wish to have it removed from research purposes, we request you kindly to access the following URL: URL\n\nWe profoundly appreciate your cooperation and understanding in this matter." ]
578b671e7c54cf7ed178f1dee2ae73c3e063b52d
# Bangumi Image Base of Is It Wrong To Try To Pick Up Girls In A Dungeon? This is the image base of bangumi Is It Wrong to Try to Pick Up Girls in a Dungeon?, we detected 79 characters, 5929 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 128 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 62 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 406 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 34 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 19 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 31 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 30 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 57 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 18 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 12 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 52 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 183 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 21 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 112 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 103 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 55 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 10 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 577 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 85 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 41 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 32 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 55 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 16 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 1150 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 36 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 22 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 20 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 25 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 17 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | ![preview 8](28/preview_8.png) | | 29 | 6 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | N/A | N/A | | 30 | 58 | [Download](30/dataset.zip) | ![preview 1](30/preview_1.png) | ![preview 2](30/preview_2.png) | ![preview 3](30/preview_3.png) | ![preview 4](30/preview_4.png) | ![preview 5](30/preview_5.png) | ![preview 6](30/preview_6.png) | ![preview 7](30/preview_7.png) | ![preview 8](30/preview_8.png) | | 31 | 8 | [Download](31/dataset.zip) | ![preview 1](31/preview_1.png) | ![preview 2](31/preview_2.png) | ![preview 3](31/preview_3.png) | ![preview 4](31/preview_4.png) | ![preview 5](31/preview_5.png) | ![preview 6](31/preview_6.png) | ![preview 7](31/preview_7.png) | ![preview 8](31/preview_8.png) | | 32 | 12 | [Download](32/dataset.zip) | ![preview 1](32/preview_1.png) | ![preview 2](32/preview_2.png) | ![preview 3](32/preview_3.png) | ![preview 4](32/preview_4.png) | ![preview 5](32/preview_5.png) | ![preview 6](32/preview_6.png) | ![preview 7](32/preview_7.png) | ![preview 8](32/preview_8.png) | | 33 | 19 | [Download](33/dataset.zip) | ![preview 1](33/preview_1.png) | ![preview 2](33/preview_2.png) | ![preview 3](33/preview_3.png) | ![preview 4](33/preview_4.png) | ![preview 5](33/preview_5.png) | ![preview 6](33/preview_6.png) | ![preview 7](33/preview_7.png) | ![preview 8](33/preview_8.png) | | 34 | 214 | [Download](34/dataset.zip) | ![preview 1](34/preview_1.png) | ![preview 2](34/preview_2.png) | ![preview 3](34/preview_3.png) | ![preview 4](34/preview_4.png) | ![preview 5](34/preview_5.png) | ![preview 6](34/preview_6.png) | ![preview 7](34/preview_7.png) | ![preview 8](34/preview_8.png) | | 35 | 116 | [Download](35/dataset.zip) | ![preview 1](35/preview_1.png) | ![preview 2](35/preview_2.png) | ![preview 3](35/preview_3.png) | ![preview 4](35/preview_4.png) | ![preview 5](35/preview_5.png) | ![preview 6](35/preview_6.png) | ![preview 7](35/preview_7.png) | ![preview 8](35/preview_8.png) | | 36 | 33 | [Download](36/dataset.zip) | ![preview 1](36/preview_1.png) | ![preview 2](36/preview_2.png) | ![preview 3](36/preview_3.png) | ![preview 4](36/preview_4.png) | ![preview 5](36/preview_5.png) | ![preview 6](36/preview_6.png) | ![preview 7](36/preview_7.png) | ![preview 8](36/preview_8.png) | | 37 | 16 | [Download](37/dataset.zip) | ![preview 1](37/preview_1.png) | ![preview 2](37/preview_2.png) | ![preview 3](37/preview_3.png) | ![preview 4](37/preview_4.png) | ![preview 5](37/preview_5.png) | ![preview 6](37/preview_6.png) | ![preview 7](37/preview_7.png) | ![preview 8](37/preview_8.png) | | 38 | 41 | [Download](38/dataset.zip) | ![preview 1](38/preview_1.png) | ![preview 2](38/preview_2.png) | ![preview 3](38/preview_3.png) | ![preview 4](38/preview_4.png) | ![preview 5](38/preview_5.png) | ![preview 6](38/preview_6.png) | ![preview 7](38/preview_7.png) | ![preview 8](38/preview_8.png) | | 39 | 9 | [Download](39/dataset.zip) | ![preview 1](39/preview_1.png) | ![preview 2](39/preview_2.png) | ![preview 3](39/preview_3.png) | ![preview 4](39/preview_4.png) | ![preview 5](39/preview_5.png) | ![preview 6](39/preview_6.png) | ![preview 7](39/preview_7.png) | ![preview 8](39/preview_8.png) | | 40 | 140 | [Download](40/dataset.zip) | ![preview 1](40/preview_1.png) | ![preview 2](40/preview_2.png) | ![preview 3](40/preview_3.png) | ![preview 4](40/preview_4.png) | ![preview 5](40/preview_5.png) | ![preview 6](40/preview_6.png) | ![preview 7](40/preview_7.png) | ![preview 8](40/preview_8.png) | | 41 | 45 | [Download](41/dataset.zip) | ![preview 1](41/preview_1.png) | ![preview 2](41/preview_2.png) | ![preview 3](41/preview_3.png) | ![preview 4](41/preview_4.png) | ![preview 5](41/preview_5.png) | ![preview 6](41/preview_6.png) | ![preview 7](41/preview_7.png) | ![preview 8](41/preview_8.png) | | 42 | 14 | [Download](42/dataset.zip) | ![preview 1](42/preview_1.png) | ![preview 2](42/preview_2.png) | ![preview 3](42/preview_3.png) | ![preview 4](42/preview_4.png) | ![preview 5](42/preview_5.png) | ![preview 6](42/preview_6.png) | ![preview 7](42/preview_7.png) | ![preview 8](42/preview_8.png) | | 43 | 40 | [Download](43/dataset.zip) | ![preview 1](43/preview_1.png) | ![preview 2](43/preview_2.png) | ![preview 3](43/preview_3.png) | ![preview 4](43/preview_4.png) | ![preview 5](43/preview_5.png) | ![preview 6](43/preview_6.png) | ![preview 7](43/preview_7.png) | ![preview 8](43/preview_8.png) | | 44 | 81 | [Download](44/dataset.zip) | ![preview 1](44/preview_1.png) | ![preview 2](44/preview_2.png) | ![preview 3](44/preview_3.png) | ![preview 4](44/preview_4.png) | ![preview 5](44/preview_5.png) | ![preview 6](44/preview_6.png) | ![preview 7](44/preview_7.png) | ![preview 8](44/preview_8.png) | | 45 | 43 | [Download](45/dataset.zip) | ![preview 1](45/preview_1.png) | ![preview 2](45/preview_2.png) | ![preview 3](45/preview_3.png) | ![preview 4](45/preview_4.png) | ![preview 5](45/preview_5.png) | ![preview 6](45/preview_6.png) | ![preview 7](45/preview_7.png) | ![preview 8](45/preview_8.png) | | 46 | 19 | [Download](46/dataset.zip) | ![preview 1](46/preview_1.png) | ![preview 2](46/preview_2.png) | ![preview 3](46/preview_3.png) | ![preview 4](46/preview_4.png) | ![preview 5](46/preview_5.png) | ![preview 6](46/preview_6.png) | ![preview 7](46/preview_7.png) | ![preview 8](46/preview_8.png) | | 47 | 18 | [Download](47/dataset.zip) | ![preview 1](47/preview_1.png) | ![preview 2](47/preview_2.png) | ![preview 3](47/preview_3.png) | ![preview 4](47/preview_4.png) | ![preview 5](47/preview_5.png) | ![preview 6](47/preview_6.png) | ![preview 7](47/preview_7.png) | ![preview 8](47/preview_8.png) | | 48 | 19 | [Download](48/dataset.zip) | ![preview 1](48/preview_1.png) | ![preview 2](48/preview_2.png) | ![preview 3](48/preview_3.png) | ![preview 4](48/preview_4.png) | ![preview 5](48/preview_5.png) | ![preview 6](48/preview_6.png) | ![preview 7](48/preview_7.png) | ![preview 8](48/preview_8.png) | | 49 | 82 | [Download](49/dataset.zip) | ![preview 1](49/preview_1.png) | ![preview 2](49/preview_2.png) | ![preview 3](49/preview_3.png) | ![preview 4](49/preview_4.png) | ![preview 5](49/preview_5.png) | ![preview 6](49/preview_6.png) | ![preview 7](49/preview_7.png) | ![preview 8](49/preview_8.png) | | 50 | 22 | [Download](50/dataset.zip) | ![preview 1](50/preview_1.png) | ![preview 2](50/preview_2.png) | ![preview 3](50/preview_3.png) | ![preview 4](50/preview_4.png) | ![preview 5](50/preview_5.png) | ![preview 6](50/preview_6.png) | ![preview 7](50/preview_7.png) | ![preview 8](50/preview_8.png) | | 51 | 14 | [Download](51/dataset.zip) | ![preview 1](51/preview_1.png) | ![preview 2](51/preview_2.png) | ![preview 3](51/preview_3.png) | ![preview 4](51/preview_4.png) | ![preview 5](51/preview_5.png) | ![preview 6](51/preview_6.png) | ![preview 7](51/preview_7.png) | ![preview 8](51/preview_8.png) | | 52 | 6 | [Download](52/dataset.zip) | ![preview 1](52/preview_1.png) | ![preview 2](52/preview_2.png) | ![preview 3](52/preview_3.png) | ![preview 4](52/preview_4.png) | ![preview 5](52/preview_5.png) | ![preview 6](52/preview_6.png) | N/A | N/A | | 53 | 17 | [Download](53/dataset.zip) | ![preview 1](53/preview_1.png) | ![preview 2](53/preview_2.png) | ![preview 3](53/preview_3.png) | ![preview 4](53/preview_4.png) | ![preview 5](53/preview_5.png) | ![preview 6](53/preview_6.png) | ![preview 7](53/preview_7.png) | ![preview 8](53/preview_8.png) | | 54 | 14 | [Download](54/dataset.zip) | ![preview 1](54/preview_1.png) | ![preview 2](54/preview_2.png) | ![preview 3](54/preview_3.png) | ![preview 4](54/preview_4.png) | ![preview 5](54/preview_5.png) | ![preview 6](54/preview_6.png) | ![preview 7](54/preview_7.png) | ![preview 8](54/preview_8.png) | | 55 | 79 | [Download](55/dataset.zip) | ![preview 1](55/preview_1.png) | ![preview 2](55/preview_2.png) | ![preview 3](55/preview_3.png) | ![preview 4](55/preview_4.png) | ![preview 5](55/preview_5.png) | ![preview 6](55/preview_6.png) | ![preview 7](55/preview_7.png) | ![preview 8](55/preview_8.png) | | 56 | 133 | [Download](56/dataset.zip) | ![preview 1](56/preview_1.png) | ![preview 2](56/preview_2.png) | ![preview 3](56/preview_3.png) | ![preview 4](56/preview_4.png) | ![preview 5](56/preview_5.png) | ![preview 6](56/preview_6.png) | ![preview 7](56/preview_7.png) | ![preview 8](56/preview_8.png) | | 57 | 13 | [Download](57/dataset.zip) | ![preview 1](57/preview_1.png) | ![preview 2](57/preview_2.png) | ![preview 3](57/preview_3.png) | ![preview 4](57/preview_4.png) | ![preview 5](57/preview_5.png) | ![preview 6](57/preview_6.png) | ![preview 7](57/preview_7.png) | ![preview 8](57/preview_8.png) | | 58 | 13 | [Download](58/dataset.zip) | ![preview 1](58/preview_1.png) | ![preview 2](58/preview_2.png) | ![preview 3](58/preview_3.png) | ![preview 4](58/preview_4.png) | ![preview 5](58/preview_5.png) | ![preview 6](58/preview_6.png) | ![preview 7](58/preview_7.png) | ![preview 8](58/preview_8.png) | | 59 | 195 | [Download](59/dataset.zip) | ![preview 1](59/preview_1.png) | ![preview 2](59/preview_2.png) | ![preview 3](59/preview_3.png) | ![preview 4](59/preview_4.png) | ![preview 5](59/preview_5.png) | ![preview 6](59/preview_6.png) | ![preview 7](59/preview_7.png) | ![preview 8](59/preview_8.png) | | 60 | 97 | [Download](60/dataset.zip) | ![preview 1](60/preview_1.png) | ![preview 2](60/preview_2.png) | ![preview 3](60/preview_3.png) | ![preview 4](60/preview_4.png) | ![preview 5](60/preview_5.png) | ![preview 6](60/preview_6.png) | ![preview 7](60/preview_7.png) | ![preview 8](60/preview_8.png) | | 61 | 27 | [Download](61/dataset.zip) | ![preview 1](61/preview_1.png) | ![preview 2](61/preview_2.png) | ![preview 3](61/preview_3.png) | ![preview 4](61/preview_4.png) | ![preview 5](61/preview_5.png) | ![preview 6](61/preview_6.png) | ![preview 7](61/preview_7.png) | ![preview 8](61/preview_8.png) | | 62 | 13 | [Download](62/dataset.zip) | ![preview 1](62/preview_1.png) | ![preview 2](62/preview_2.png) | ![preview 3](62/preview_3.png) | ![preview 4](62/preview_4.png) | ![preview 5](62/preview_5.png) | ![preview 6](62/preview_6.png) | ![preview 7](62/preview_7.png) | ![preview 8](62/preview_8.png) | | 63 | 62 | [Download](63/dataset.zip) | ![preview 1](63/preview_1.png) | ![preview 2](63/preview_2.png) | ![preview 3](63/preview_3.png) | ![preview 4](63/preview_4.png) | ![preview 5](63/preview_5.png) | ![preview 6](63/preview_6.png) | ![preview 7](63/preview_7.png) | ![preview 8](63/preview_8.png) | | 64 | 8 | [Download](64/dataset.zip) | ![preview 1](64/preview_1.png) | ![preview 2](64/preview_2.png) | ![preview 3](64/preview_3.png) | ![preview 4](64/preview_4.png) | ![preview 5](64/preview_5.png) | ![preview 6](64/preview_6.png) | ![preview 7](64/preview_7.png) | ![preview 8](64/preview_8.png) | | 65 | 9 | [Download](65/dataset.zip) | ![preview 1](65/preview_1.png) | ![preview 2](65/preview_2.png) | ![preview 3](65/preview_3.png) | ![preview 4](65/preview_4.png) | ![preview 5](65/preview_5.png) | ![preview 6](65/preview_6.png) | ![preview 7](65/preview_7.png) | ![preview 8](65/preview_8.png) | | 66 | 8 | [Download](66/dataset.zip) | ![preview 1](66/preview_1.png) | ![preview 2](66/preview_2.png) | ![preview 3](66/preview_3.png) | ![preview 4](66/preview_4.png) | ![preview 5](66/preview_5.png) | ![preview 6](66/preview_6.png) | ![preview 7](66/preview_7.png) | ![preview 8](66/preview_8.png) | | 67 | 33 | [Download](67/dataset.zip) | ![preview 1](67/preview_1.png) | ![preview 2](67/preview_2.png) | ![preview 3](67/preview_3.png) | ![preview 4](67/preview_4.png) | ![preview 5](67/preview_5.png) | ![preview 6](67/preview_6.png) | ![preview 7](67/preview_7.png) | ![preview 8](67/preview_8.png) | | 68 | 6 | [Download](68/dataset.zip) | ![preview 1](68/preview_1.png) | ![preview 2](68/preview_2.png) | ![preview 3](68/preview_3.png) | ![preview 4](68/preview_4.png) | ![preview 5](68/preview_5.png) | ![preview 6](68/preview_6.png) | N/A | N/A | | 69 | 31 | [Download](69/dataset.zip) | ![preview 1](69/preview_1.png) | ![preview 2](69/preview_2.png) | ![preview 3](69/preview_3.png) | ![preview 4](69/preview_4.png) | ![preview 5](69/preview_5.png) | ![preview 6](69/preview_6.png) | ![preview 7](69/preview_7.png) | ![preview 8](69/preview_8.png) | | 70 | 9 | [Download](70/dataset.zip) | ![preview 1](70/preview_1.png) | ![preview 2](70/preview_2.png) | ![preview 3](70/preview_3.png) | ![preview 4](70/preview_4.png) | ![preview 5](70/preview_5.png) | ![preview 6](70/preview_6.png) | ![preview 7](70/preview_7.png) | ![preview 8](70/preview_8.png) | | 71 | 13 | [Download](71/dataset.zip) | ![preview 1](71/preview_1.png) | ![preview 2](71/preview_2.png) | ![preview 3](71/preview_3.png) | ![preview 4](71/preview_4.png) | ![preview 5](71/preview_5.png) | ![preview 6](71/preview_6.png) | ![preview 7](71/preview_7.png) | ![preview 8](71/preview_8.png) | | 72 | 7 | [Download](72/dataset.zip) | ![preview 1](72/preview_1.png) | ![preview 2](72/preview_2.png) | ![preview 3](72/preview_3.png) | ![preview 4](72/preview_4.png) | ![preview 5](72/preview_5.png) | ![preview 6](72/preview_6.png) | ![preview 7](72/preview_7.png) | N/A | | 73 | 22 | [Download](73/dataset.zip) | ![preview 1](73/preview_1.png) | ![preview 2](73/preview_2.png) | ![preview 3](73/preview_3.png) | ![preview 4](73/preview_4.png) | ![preview 5](73/preview_5.png) | ![preview 6](73/preview_6.png) | ![preview 7](73/preview_7.png) | ![preview 8](73/preview_8.png) | | 74 | 6 | [Download](74/dataset.zip) | ![preview 1](74/preview_1.png) | ![preview 2](74/preview_2.png) | ![preview 3](74/preview_3.png) | ![preview 4](74/preview_4.png) | ![preview 5](74/preview_5.png) | ![preview 6](74/preview_6.png) | N/A | N/A | | 75 | 61 | [Download](75/dataset.zip) | ![preview 1](75/preview_1.png) | ![preview 2](75/preview_2.png) | ![preview 3](75/preview_3.png) | ![preview 4](75/preview_4.png) | ![preview 5](75/preview_5.png) | ![preview 6](75/preview_6.png) | ![preview 7](75/preview_7.png) | ![preview 8](75/preview_8.png) | | 76 | 13 | [Download](76/dataset.zip) | ![preview 1](76/preview_1.png) | ![preview 2](76/preview_2.png) | ![preview 3](76/preview_3.png) | ![preview 4](76/preview_4.png) | ![preview 5](76/preview_5.png) | ![preview 6](76/preview_6.png) | ![preview 7](76/preview_7.png) | ![preview 8](76/preview_8.png) | | 77 | 24 | [Download](77/dataset.zip) | ![preview 1](77/preview_1.png) | ![preview 2](77/preview_2.png) | ![preview 3](77/preview_3.png) | ![preview 4](77/preview_4.png) | ![preview 5](77/preview_5.png) | ![preview 6](77/preview_6.png) | ![preview 7](77/preview_7.png) | ![preview 8](77/preview_8.png) | | noise | 532 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/isitwrongtotrytopickupgirlsinadungeon
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-02T11:38:14+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-02T14:25:11+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Is It Wrong To Try To Pick Up Girls In A Dungeon? ======================================================================= This is the image base of bangumi Is It Wrong to Try to Pick Up Girls in a Dungeon?, we detected 79 characters, 5929 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
8b93e7e872f749cc5c154d87aa8b26fb8b45d56f
# Bangumi Image Base of Koisuru Asteroid This is the image base of bangumi Koisuru Asteroid, we detected 31 characters, 2450 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 501 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 15 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 22 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 40 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 222 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 45 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 14 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 94 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 425 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 14 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 18 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 26 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 17 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 114 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 27 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 15 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 13 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 39 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 245 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 27 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 39 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 187 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 9 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 15 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 12 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 33 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 12 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 71 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 6 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | N/A | N/A | | 29 | 5 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | N/A | N/A | N/A | | noise | 128 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/koisuruasteroid
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-02T11:38:55+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-02T12:43:32+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Koisuru Asteroid ====================================== This is the image base of bangumi Koisuru Asteroid, we detected 31 characters, 2450 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
76a018624004ee4ff70d7a96958bdb6a96e00520
# Dataset Card for "cb0120f1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
result-kand2-sdxl-wuerst-karlo/cb0120f1
[ "region:us" ]
2023-10-02T11:45:33+00:00
{"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 232, "num_examples": 10}], "download_size": 1452, "dataset_size": 232}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-02T11:45:34+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cb0120f1" More Information needed
[ "# Dataset Card for \"cb0120f1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cb0120f1\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cb0120f1\"\n\nMore Information needed" ]
25af4d49b4e2ed83e12a9ab86fd2df91427f6683
# Dataset Card for "7fa2043a" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
result-kand2-sdxl-wuerst-karlo/7fa2043a
[ "region:us" ]
2023-10-02T12:22:27+00:00
{"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 194, "num_examples": 10}], "download_size": 1397, "dataset_size": 194}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-02T12:22:28+00:00
[]
[]
TAGS #region-us
# Dataset Card for "7fa2043a" More Information needed
[ "# Dataset Card for \"7fa2043a\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"7fa2043a\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"7fa2043a\"\n\nMore Information needed" ]
fb4223d4600527089333f132aa459fa96d2ba4ff
# Dataset Card for "6c022ac8" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
result-kand2-sdxl-wuerst-karlo/6c022ac8
[ "region:us" ]
2023-10-02T12:25:18+00:00
{"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 219, "num_examples": 10}], "download_size": 1364, "dataset_size": 219}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-02T12:25:18+00:00
[]
[]
TAGS #region-us
# Dataset Card for "6c022ac8" More Information needed
[ "# Dataset Card for \"6c022ac8\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"6c022ac8\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"6c022ac8\"\n\nMore Information needed" ]
3260240af8864097a4757106341c9829092c73d8
# Paraguay Legislation The Paraguay Legislation dataset is a comprehensive collection of legal documents sourced from the legislative framework of Paraguay. The dataset contains legal documents sourced from the legislative framework of Paraguay, including resolutions, decrees, laws, and other kinds of legislative texts. This dataset has been curated as a valuable resource for Natural Language Processing (NLP) tasks. The data is designed for research focused on text classification tasks. The classification process is divided into two objectives: 1. Binary classification: 0 - no cost and 1 - cost (legislation has costs for the society) 2. Multi-classification: classify the document into several hierarchical categories of costs. For more information about multi-classification definitions, please check this link: <todo: link to>. ## Subsets The dataset contains various subsets, each representing different data quality and preparation stages. Within these subsets, you'll encounter multiple versions of the same data, with variations primarily reflecting differences in data quality, metadata columns, and preprocessing tasks applied to change the data. The subsets are the following: **1. Raw:** Data extracted from the sources files (URls, PDFs and Word files) without any transformation or sentence splitter. It can be helpful because you can access the raw data extracted from the seeds (PDFs and Word files) and apply other preprocessing tasks from this point to prepare the data without returning to extract texts from source files. **2. Sentences:** Normalized data split by sentence, mainly treating issues of text extracted from PDF. This stage also adds metadata about the sentence, for example: if it is a title or not. **3. Sentence Unlabeled:** Unlabeled corpora of Paraguay legislation. This data is prepared to be labeled by the experts. Each instance of the dataset represents a specific text passage, split by its original formatting extracted from raw text (from original documents). **4. Sentence labeled (Ground Truth):** The labeled data is the ground truth data used to train the models. This data is annotated by legal experts indicating the existence of administrative costs (and other types) in the legislation. Each instance of the dataset represents a specific text passage. This dataset has the following data splits: * Training Set: This portion of the data is used to train and fine-tune machine learning models. * Test Set: The test set is reserved for assessing the model's accuracy, generalization, and effectiveness. It remains unseen during training and helps gauge how well the model performs on new, unseen data. Together, these labeled data subsets provide a crucial reference point for building and evaluating models, ensuring they can make informed predictions and classifications with high accuracy and reliability.
fernandoperes/py_legislation
[ "task_categories:text-classification", "size_categories:1K<n<10K", "language:es", "license:apache-2.0", "legal", "region:us" ]
2023-10-02T12:43:17+00:00
{"language": ["es"], "license": "apache-2.0", "size_categories": ["1K<n<10K"], "task_categories": ["text-classification"], "tags": ["legal"], "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "/raw_text/train.parquet"}]}, {"config_name": "raw_text", "data_files": [{"split": "train", "path": "/raw_text/train.parquet"}]}, {"config_name": "unlabeled_sentences", "data_files": [{"split": "train", "path": "/unlabeled_sentences/train.parquet"}]}], "dataset_info": [{"config_name": "raw_text", "features": [{"name": "source_id", "dtype": "int64"}, {"name": "source_name", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "text_id", "dtype": "int64"}, {"name": "extension", "dtype": {"class_label": {"names": {"0": "docx", "1": "pdf", "2": "html", "3": "txt", "4": "doc"}}}}], "split": "train"}, {"config_name": "unlabeled_sentences", "features": [{"name": "source_id", "dtype": "int64"}, {"name": "source_name", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "text_id", "dtype": "int64"}, {"name": "cost_type", "dtype": {"class_label": {"names": {"0": "no_cost", "1": "adm_cost", "2": "direct_cost", "3": "other_cost"}}}}, {"name": "affected_entity", "dtype": {"class_label": {"names": {"0": "no_affected_ent", "1": "companies", "2": "citizens", "3": "public_adm"}}}}, {"name": "io_categories", "sequence": {"class_label": {"names": {"0": "prestacao_info_empresarial_e_fiscal", "1": "pedidos_de_licencas_e_outros", "2": "registos_e_notificacoes", "3": "candidatura_a_subsidios_e_outros", "4": "disponibilizacao_de_manuais_e_outros", "5": "cooperacao_com_auditorias_e_outros", "6": "prestacao_info_a_consumidores", "7": "outras_ois"}}}}, {"name": "aa_categories", "sequence": {"class_label": {"names": {"0": "aa_1_familiarizacao_com_oi", "1": "aa_1_recolha_e_organizacao_de_info", "2": "aa_1_processamento_de_info", "3": "aa_1_tempos_de_espera", "4": "aa_1_deslocacoes", "5": "aa_1_submissao_de_info", "6": "aa_1_preservacao_de_info", "7": "aa_2_familiarizacao_com_oi", "8": "aa_2_recolha_e_organizacao_de_info", "9": "aa_2_processamento_de_info", "10": "aa_2_tempos_de_espera", "11": "aa_2_deslocacoes", "12": "aa_2_submissao_de_info", "13": "aa_2_preservacao_de_info", "14": "aa_3_familiarizacao_com_oi", "15": "aa_3_recolha_e_organizacao_de_info", "16": "aa_3_processamento_de_info", "17": "aa_3_tempos_de_espera", "18": "aa_3_deslocacoes", "19": "aa_3_submissao_de_info", "20": "aa_3_preservacao_de_info", "21": "aa_4_familiarizacao_com_oi", "22": "aa_4_recolha_e_organizacao_de_info", "23": "aa_4_processamento_de_info", "24": "aa_4_tempos_de_espera", "25": "aa_4_deslocacoes", "26": "aa_4_submissao_de_info", "27": "aa_4_preservacao_de_info", "28": "aa_5_familiarizacao_com_oi", "29": "aa_5_recolha_e_organizacao_de_info", "30": "aa_5_processamento_de_info", "31": "aa_5_tempos_de_espera", "32": "aa_5_deslocacoes", "33": "aa_5_submissao_de_info", "34": "aa_5_preservacao_de_info", "35": "aa_6_familiarizacao_com_oi", "36": "aa_6_recolha_e_organizacao_de_info", "37": "aa_6_processamento_de_info", "38": "aa_6_tempos_de_espera", "39": "aa_6_deslocacoes", "40": "aa_6_submissao_de_info", "41": "aa_6_preservacao_de_info", "42": "aa_7_familiarizacao_com_oi", "43": "aa_7_recolha_e_organizacao_de_info", "44": "aa_7_processamento_de_info", "45": "aa_7_tempos_de_espera", "46": "aa_7_deslocacoes", "47": "aa_7_submissao_de_info", "48": "aa_7_preservacao_de_info"}}}}, {"name": "aa_categories_unique", "sequence": {"class_label": {"names": {"0": "familiarizacao_com_oi", "1": "recolha_e_organizacao_de_info", "2": "processamento_de_info", "3": "tempos_de_espera", "4": "deslocacoes", "5": "submissao_de_info", "6": "preservacao_de_info"}}}}], "splits": [{"name": "train"}]}]}
2023-10-04T11:10:16+00:00
[]
[ "es" ]
TAGS #task_categories-text-classification #size_categories-1K<n<10K #language-Spanish #license-apache-2.0 #legal #region-us
# Paraguay Legislation The Paraguay Legislation dataset is a comprehensive collection of legal documents sourced from the legislative framework of Paraguay. The dataset contains legal documents sourced from the legislative framework of Paraguay, including resolutions, decrees, laws, and other kinds of legislative texts. This dataset has been curated as a valuable resource for Natural Language Processing (NLP) tasks. The data is designed for research focused on text classification tasks. The classification process is divided into two objectives: 1. Binary classification: 0 - no cost and 1 - cost (legislation has costs for the society) 2. Multi-classification: classify the document into several hierarchical categories of costs. For more information about multi-classification definitions, please check this link: <todo: link to>. ## Subsets The dataset contains various subsets, each representing different data quality and preparation stages. Within these subsets, you'll encounter multiple versions of the same data, with variations primarily reflecting differences in data quality, metadata columns, and preprocessing tasks applied to change the data. The subsets are the following: 1. Raw: Data extracted from the sources files (URls, PDFs and Word files) without any transformation or sentence splitter. It can be helpful because you can access the raw data extracted from the seeds (PDFs and Word files) and apply other preprocessing tasks from this point to prepare the data without returning to extract texts from source files. 2. Sentences: Normalized data split by sentence, mainly treating issues of text extracted from PDF. This stage also adds metadata about the sentence, for example: if it is a title or not. 3. Sentence Unlabeled: Unlabeled corpora of Paraguay legislation. This data is prepared to be labeled by the experts. Each instance of the dataset represents a specific text passage, split by its original formatting extracted from raw text (from original documents). 4. Sentence labeled (Ground Truth): The labeled data is the ground truth data used to train the models. This data is annotated by legal experts indicating the existence of administrative costs (and other types) in the legislation. Each instance of the dataset represents a specific text passage. This dataset has the following data splits: * Training Set: This portion of the data is used to train and fine-tune machine learning models. * Test Set: The test set is reserved for assessing the model's accuracy, generalization, and effectiveness. It remains unseen during training and helps gauge how well the model performs on new, unseen data. Together, these labeled data subsets provide a crucial reference point for building and evaluating models, ensuring they can make informed predictions and classifications with high accuracy and reliability.
[ "# Paraguay Legislation\n\nThe Paraguay Legislation dataset is a comprehensive collection of legal documents sourced from the legislative framework of Paraguay. The dataset contains legal documents sourced from the legislative framework of Paraguay, including resolutions, decrees, laws, and other kinds of legislative texts.\n\nThis dataset has been curated as a valuable resource for Natural Language Processing (NLP) tasks. The data is designed for research focused on text classification tasks. The classification process is divided into two objectives:\n\n 1. Binary classification: 0 - no cost and 1 - cost (legislation has costs for the society)\n\n 2. Multi-classification: classify the document into several hierarchical categories of costs.\n\nFor more information about multi-classification definitions, please check this link: <todo: link to>.", "## Subsets\n\nThe dataset contains various subsets, each representing different data quality and preparation stages. Within these subsets, you'll encounter multiple versions of the same data, with variations primarily reflecting differences in data quality, metadata columns, and preprocessing tasks applied to change the data.\n\nThe subsets are the following:\n\n1. Raw: Data extracted from the sources files (URls, PDFs and Word files) without any transformation or sentence splitter. It can be helpful because you can access the raw data extracted from the seeds (PDFs and Word files) and apply other preprocessing tasks from this point to prepare the data without returning to extract texts from source files.\n\n2. Sentences: Normalized data split by sentence, mainly treating issues of text extracted from PDF. This stage also adds metadata about the sentence, for example: if it is a title or not.\n\n3. Sentence Unlabeled: Unlabeled corpora of Paraguay legislation. This data is prepared to be labeled by the experts. Each instance of the dataset represents a specific text passage, split by its original formatting extracted from raw text (from original documents).\n\n4. Sentence labeled (Ground Truth): The labeled data is the ground truth data used to train the models. This data is annotated by legal experts indicating the existence of administrative costs (and other types) in the legislation. Each instance of the dataset represents a specific text passage.\n\nThis dataset has the following data splits:\n\n* Training Set: This portion of the data is used to train and fine-tune machine learning models.\n\n* Test Set: The test set is reserved for assessing the model's accuracy, generalization, and effectiveness. It remains unseen during training and helps gauge how well the model performs on new, unseen data.\n\nTogether, these labeled data subsets provide a crucial reference point for building and evaluating models, ensuring they can make informed predictions and classifications with high accuracy and reliability." ]
[ "TAGS\n#task_categories-text-classification #size_categories-1K<n<10K #language-Spanish #license-apache-2.0 #legal #region-us \n", "# Paraguay Legislation\n\nThe Paraguay Legislation dataset is a comprehensive collection of legal documents sourced from the legislative framework of Paraguay. The dataset contains legal documents sourced from the legislative framework of Paraguay, including resolutions, decrees, laws, and other kinds of legislative texts.\n\nThis dataset has been curated as a valuable resource for Natural Language Processing (NLP) tasks. The data is designed for research focused on text classification tasks. The classification process is divided into two objectives:\n\n 1. Binary classification: 0 - no cost and 1 - cost (legislation has costs for the society)\n\n 2. Multi-classification: classify the document into several hierarchical categories of costs.\n\nFor more information about multi-classification definitions, please check this link: <todo: link to>.", "## Subsets\n\nThe dataset contains various subsets, each representing different data quality and preparation stages. Within these subsets, you'll encounter multiple versions of the same data, with variations primarily reflecting differences in data quality, metadata columns, and preprocessing tasks applied to change the data.\n\nThe subsets are the following:\n\n1. Raw: Data extracted from the sources files (URls, PDFs and Word files) without any transformation or sentence splitter. It can be helpful because you can access the raw data extracted from the seeds (PDFs and Word files) and apply other preprocessing tasks from this point to prepare the data without returning to extract texts from source files.\n\n2. Sentences: Normalized data split by sentence, mainly treating issues of text extracted from PDF. This stage also adds metadata about the sentence, for example: if it is a title or not.\n\n3. Sentence Unlabeled: Unlabeled corpora of Paraguay legislation. This data is prepared to be labeled by the experts. Each instance of the dataset represents a specific text passage, split by its original formatting extracted from raw text (from original documents).\n\n4. Sentence labeled (Ground Truth): The labeled data is the ground truth data used to train the models. This data is annotated by legal experts indicating the existence of administrative costs (and other types) in the legislation. Each instance of the dataset represents a specific text passage.\n\nThis dataset has the following data splits:\n\n* Training Set: This portion of the data is used to train and fine-tune machine learning models.\n\n* Test Set: The test set is reserved for assessing the model's accuracy, generalization, and effectiveness. It remains unseen during training and helps gauge how well the model performs on new, unseen data.\n\nTogether, these labeled data subsets provide a crucial reference point for building and evaluating models, ensuring they can make informed predictions and classifications with high accuracy and reliability." ]
[ 44, 181, 456 ]
[ "passage: TAGS\n#task_categories-text-classification #size_categories-1K<n<10K #language-Spanish #license-apache-2.0 #legal #region-us \n# Paraguay Legislation\n\nThe Paraguay Legislation dataset is a comprehensive collection of legal documents sourced from the legislative framework of Paraguay. The dataset contains legal documents sourced from the legislative framework of Paraguay, including resolutions, decrees, laws, and other kinds of legislative texts.\n\nThis dataset has been curated as a valuable resource for Natural Language Processing (NLP) tasks. The data is designed for research focused on text classification tasks. The classification process is divided into two objectives:\n\n 1. Binary classification: 0 - no cost and 1 - cost (legislation has costs for the society)\n\n 2. Multi-classification: classify the document into several hierarchical categories of costs.\n\nFor more information about multi-classification definitions, please check this link: <todo: link to>." ]
64ad9620a0ad41f4ba915d3d443b074d89ce65a0
# Dataset Card for Dataset Name ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
Sviluppo/test02
[ "region:us" ]
2023-10-02T12:54:41+00:00
{}
2023-10-03T06:46:26+00:00
[]
[]
TAGS #region-us
# Dataset Card for Dataset Name ## Dataset Description - Homepage: - Repository: - Paper: - Leaderboard: - Point of Contact: ### Dataset Summary This dataset card aims to be a base template for new datasets. It has been generated using this raw template. ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Dataset Name", "## Dataset Description\n\n- Homepage: \n- Repository: \n- Paper: \n- Leaderboard: \n- Point of Contact:", "### Dataset Summary\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Dataset Name", "## Dataset Description\n\n- Homepage: \n- Repository: \n- Paper: \n- Leaderboard: \n- Point of Contact:", "### Dataset Summary\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 8, 24, 32, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Dataset Name## Dataset Description\n\n- Homepage: \n- Repository: \n- Paper: \n- Leaderboard: \n- Point of Contact:### Dataset Summary\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
68b45ff6d443a26f08e6da890a8f4e0ca35acc73
# Dataset Card for "14k_data_multichoice" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
BaorBaor/14k_data_multichoice
[ "region:us" ]
2023-10-02T13:22:11+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": {"sequence": "int32"}}, {"name": "token_type_ids", "sequence": {"sequence": "int8"}}, {"name": "attention_mask", "sequence": {"sequence": "int8"}}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 412680494, "num_examples": 14467}], "download_size": 66160105, "dataset_size": 412680494}}
2023-10-03T01:09:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for "14k_data_multichoice" More Information needed
[ "# Dataset Card for \"14k_data_multichoice\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"14k_data_multichoice\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"14k_data_multichoice\"\n\nMore Information needed" ]
1570d5672704be02ea6993570512b623ef43b906
Based on Sberquad - Answer converted to human affordable answer. - Context augmented with some pices of texts from wiki accordant to text on tematic and keywords. - This dataset cold be used for training retrieval LLM models or modificators for ability of LLM to retrieve target information from collection of tematic related texts. - Dataset has version with SOURCE data for generating answer with specifing source document for right answer. See file retrieval_dataset_src.jsonl Dataset consists of 45278 examples in russian language of format: { 'text': 'text with correct answer', 'q': 'question text', 'a': 'correct answer text', 'context': 'text of 4-10 text chunks, one with right answer and others relevant with text and question on tematic and keywords' } Length of one example of context + question + answer is less than 7000 symbols. It should be less than 2048 tokens of rugpt tokenizer. File retrieval_dataset_src.jsonl has additionally SOURCE data for every text chunk in context, also SOURCE of right answer is set in answer. This variant of dataset is useful if you need extract answer with specifing source of the right answer. { 'text': 'text with correct answer', 'q': 'question text', 'a': 'correct answer text with SOURCE data of text', 'context': 'text of 4-10 text chunks, one with right answer and others relevant with text and question on tematic and keywords. Each of text chunks has it's own SOURCE data' } All SOURCE data are sintetic generated and not real.
MLNavigator/russian-retrieval
[ "license:mit", "region:us" ]
2023-10-02T13:58:05+00:00
{"license": "mit"}
2023-10-30T13:22:15+00:00
[]
[]
TAGS #license-mit #region-us
Based on Sberquad - Answer converted to human affordable answer. - Context augmented with some pices of texts from wiki accordant to text on tematic and keywords. - This dataset cold be used for training retrieval LLM models or modificators for ability of LLM to retrieve target information from collection of tematic related texts. - Dataset has version with SOURCE data for generating answer with specifing source document for right answer. See file retrieval_dataset_src.jsonl Dataset consists of 45278 examples in russian language of format: { 'text': 'text with correct answer', 'q': 'question text', 'a': 'correct answer text', 'context': 'text of 4-10 text chunks, one with right answer and others relevant with text and question on tematic and keywords' } Length of one example of context + question + answer is less than 7000 symbols. It should be less than 2048 tokens of rugpt tokenizer. File retrieval_dataset_src.jsonl has additionally SOURCE data for every text chunk in context, also SOURCE of right answer is set in answer. This variant of dataset is useful if you need extract answer with specifing source of the right answer. { 'text': 'text with correct answer', 'q': 'question text', 'a': 'correct answer text with SOURCE data of text', 'context': 'text of 4-10 text chunks, one with right answer and others relevant with text and question on tematic and keywords. Each of text chunks has it's own SOURCE data' } All SOURCE data are sintetic generated and not real.
[]
[ "TAGS\n#license-mit #region-us \n" ]
[ 11 ]
[ "passage: TAGS\n#license-mit #region-us \n" ]
3d723c68dd3b6ac3c4c0ae021a2d3ab908c4409a
# Dataset Card for "sales-conversations-instruction" Modification of https://huggingface.co/datasets/goendalf666/sales-conversations-2 The following script was used to transform the sales-conversations-2 dataset to this instruction based dataset: See the main model or github for more information salesGPT_v2: https://huggingface.co/goendalf666/salesGPT_v2 github: https://github.com/tom813/salesGPT_foundation This dataset was created for the purpose of training a sales agent chatbot that can convince people. The initial idea came from: textbooks is all you need https://arxiv.org/abs/2306.11644 gpt-3.5-turbo was used for the generation # Structure The conversations have a customer and a salesman which appear always in changing order. customer, salesman, customer, salesman, etc. The customer always starts the conversation Who ends the conversation is not defined. # Generation Note that a textbook dataset is mandatory for this conversation generation. This examples rely on the following textbook dataset: https://huggingface.co/datasets/goendalf666/sales-textbook_for_convincing_and_selling The data generation code can be found here: https://github.com/tom813/salesGPT_foundation/blob/main/data_generation/conversation2conversation_instruction.py ``` import pandas as pd from datasets import load_dataset, Dataset data = load_dataset("goendalf666/sales-conversations-2", split="train") df = data.to_pandas() df_dict = df.to_dict(orient='list') df = df.fillna('') conversations = [] for i in df.iterrows(): current_conversation = "" try: for j in i[1]: if "Customer:" in j: current_conversation += j + " " elif "Salesman:" in j: prompt = f"""You are a in the role of a Salesman. Here is a conversation: {current_conversation} Answer as a Salesman to the previous Statement to convince the person to buy the product or service. {j}""" conversations.append(prompt) current_conversation += j + " " else: break except Exception as e: print(e) print(len(conversations)) df = pd.DataFrame(conversations) ds = Dataset.from_pandas(df) ds.push_to_hub("goendalf666/sales-conversations-instruction") ``` [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
goendalf666/sales-conversations-instruction-base
[ "arxiv:2306.11644", "region:us" ]
2023-10-02T14:03:17+00:00
{"dataset_info": {"features": [{"name": "0", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 28036745, "num_examples": 20940}], "download_size": 4782593, "dataset_size": 28036745}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-04T19:44:33+00:00
[ "2306.11644" ]
[]
TAGS #arxiv-2306.11644 #region-us
# Dataset Card for "sales-conversations-instruction" Modification of URL The following script was used to transform the sales-conversations-2 dataset to this instruction based dataset: See the main model or github for more information salesGPT_v2: URL github: URL This dataset was created for the purpose of training a sales agent chatbot that can convince people. The initial idea came from: textbooks is all you need URL gpt-3.5-turbo was used for the generation # Structure The conversations have a customer and a salesman which appear always in changing order. customer, salesman, customer, salesman, etc. The customer always starts the conversation Who ends the conversation is not defined. # Generation Note that a textbook dataset is mandatory for this conversation generation. This examples rely on the following textbook dataset: URL The data generation code can be found here: URL More Information needed
[ "# Dataset Card for \"sales-conversations-instruction\"\n\nModification of URL \nThe following script was used to transform the sales-conversations-2 dataset to this instruction based dataset:\n\nSee the main model or github for more information\n\nsalesGPT_v2: URL\n\ngithub: URL\n\nThis dataset was created for the purpose of training a sales agent chatbot that can convince people.\n\nThe initial idea came from: textbooks is all you need URL\n\ngpt-3.5-turbo was used for the generation", "# Structure\nThe conversations have a customer and a salesman which appear always in changing order. customer, salesman, customer, salesman, etc. \nThe customer always starts the conversation\nWho ends the conversation is not defined.", "# Generation\nNote that a textbook dataset is mandatory for this conversation generation. This examples rely on the following textbook dataset:\nURL\n\nThe data generation code can be found here: URL\n\n\n\nMore Information needed" ]
[ "TAGS\n#arxiv-2306.11644 #region-us \n", "# Dataset Card for \"sales-conversations-instruction\"\n\nModification of URL \nThe following script was used to transform the sales-conversations-2 dataset to this instruction based dataset:\n\nSee the main model or github for more information\n\nsalesGPT_v2: URL\n\ngithub: URL\n\nThis dataset was created for the purpose of training a sales agent chatbot that can convince people.\n\nThe initial idea came from: textbooks is all you need URL\n\ngpt-3.5-turbo was used for the generation", "# Structure\nThe conversations have a customer and a salesman which appear always in changing order. customer, salesman, customer, salesman, etc. \nThe customer always starts the conversation\nWho ends the conversation is not defined.", "# Generation\nNote that a textbook dataset is mandatory for this conversation generation. This examples rely on the following textbook dataset:\nURL\n\nThe data generation code can be found here: URL\n\n\n\nMore Information needed" ]
[ 14, 110, 50, 44 ]
[ "passage: TAGS\n#arxiv-2306.11644 #region-us \n# Dataset Card for \"sales-conversations-instruction\"\n\nModification of URL \nThe following script was used to transform the sales-conversations-2 dataset to this instruction based dataset:\n\nSee the main model or github for more information\n\nsalesGPT_v2: URL\n\ngithub: URL\n\nThis dataset was created for the purpose of training a sales agent chatbot that can convince people.\n\nThe initial idea came from: textbooks is all you need URL\n\ngpt-3.5-turbo was used for the generation# Structure\nThe conversations have a customer and a salesman which appear always in changing order. customer, salesman, customer, salesman, etc. \nThe customer always starts the conversation\nWho ends the conversation is not defined.# Generation\nNote that a textbook dataset is mandatory for this conversation generation. This examples rely on the following textbook dataset:\nURL\n\nThe data generation code can be found here: URL\n\n\n\nMore Information needed" ]
649f2f230334b753c028e51cec2e51f1290cd399
# Dataset Card for HC-Var (Human and ChatGPT Texts with Variety) This is a collection of human texts and ChatGPT (GPT3.5-Turbo) generated texts, to faciliate studies such as generated texts detection. It includes the texts which are generated / human written to accomplish various language tasks with various approaches. The included language tasks and topics are summarized below. Note: For each language task, this dataset considers 3 different prompts to inquire ChatGPT outputs. The example code to train binary classification models is in [this website](https://github.com/hannxu123/hc_var). A technical report on some representative detection methods can be find in [this paper](https://arxiv.org/abs/2310.01307). This dataset is collected by Han Xu from Michigan State University. Potential issues and suggestions are welcomed to be dicussed in the community panel or emails to [email protected]. ## Key variables in the dataset: **text**: The text body (including either human or ChatGPT texts.)\ **domain**: The language tasks included in this dataset: News, Review, (Essay) Writing, QA\ **topic**: The topic in each task.\ **prompt**: The prompt used to obtain ChatGPT outputs. "N/A" for human texts.\ **pp_id**: Each task has 3 prompts to inquire ChatGPT outputs. The "pp_id" denotes the index of prompt. "0" for human texts. "1-3" for ChatGPT texts.\ **label**: "0" for human texts. "1" for ChatGPT texts. ## To cite this dataset ``` @misc{xu2023generalization, title={On the Generalization of Training-based ChatGPT Detection Methods}, author={Han Xu and Jie Ren and Pengfei He and Shenglai Zeng and Yingqian Cui and Amy Liu and Hui Liu and Jiliang Tang}, year={2023}, eprint={2310.01307}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
hannxu/hc_var
[ "task_categories:text-classification", "size_categories:100M<n<1B", "language:en", "license:apache-2.0", "arxiv:2310.01307", "region:us" ]
2023-10-02T14:24:06+00:00
{"language": ["en"], "license": "apache-2.0", "size_categories": ["100M<n<1B"], "task_categories": ["text-classification"]}
2023-10-03T15:33:15+00:00
[ "2310.01307" ]
[ "en" ]
TAGS #task_categories-text-classification #size_categories-100M<n<1B #language-English #license-apache-2.0 #arxiv-2310.01307 #region-us
# Dataset Card for HC-Var (Human and ChatGPT Texts with Variety) This is a collection of human texts and ChatGPT (GPT3.5-Turbo) generated texts, to faciliate studies such as generated texts detection. It includes the texts which are generated / human written to accomplish various language tasks with various approaches. The included language tasks and topics are summarized below. Note: For each language task, this dataset considers 3 different prompts to inquire ChatGPT outputs. The example code to train binary classification models is in this website. A technical report on some representative detection methods can be find in this paper. This dataset is collected by Han Xu from Michigan State University. Potential issues and suggestions are welcomed to be dicussed in the community panel or emails to xuhan1@URL. ## Key variables in the dataset: text: The text body (including either human or ChatGPT texts.)\ domain: The language tasks included in this dataset: News, Review, (Essay) Writing, QA\ topic: The topic in each task.\ prompt: The prompt used to obtain ChatGPT outputs. "N/A" for human texts.\ pp_id: Each task has 3 prompts to inquire ChatGPT outputs. The "pp_id" denotes the index of prompt. "0" for human texts. "1-3" for ChatGPT texts.\ label: "0" for human texts. "1" for ChatGPT texts. ## To cite this dataset
[ "# Dataset Card for HC-Var (Human and ChatGPT Texts with Variety)\n\nThis is a collection of human texts and ChatGPT (GPT3.5-Turbo) generated texts, to faciliate studies such as generated texts detection. \nIt includes the texts which are generated / human written to accomplish various language tasks with various approaches. \nThe included language tasks and topics are summarized below. Note: For each language task, this dataset considers 3 different prompts to inquire ChatGPT outputs.\nThe example code to train binary classification models is in this website. \nA technical report on some representative detection methods can be find in this paper.\nThis dataset is collected by Han Xu from Michigan State\nUniversity. Potential issues and suggestions are welcomed to be dicussed in the community panel or emails to xuhan1@URL.", "## Key variables in the dataset: \ntext: The text body (including either human or ChatGPT texts.)\\\ndomain: The language tasks included in this dataset: News, Review, (Essay) Writing, QA\\\ntopic: The topic in each task.\\\nprompt: The prompt used to obtain ChatGPT outputs. \"N/A\" for human texts.\\\npp_id: Each task has 3 prompts to inquire ChatGPT outputs. The \"pp_id\" denotes the index of prompt. \"0\" for human texts. \"1-3\" for ChatGPT texts.\\\nlabel: \"0\" for human texts. \"1\" for ChatGPT texts.", "## To cite this dataset" ]
[ "TAGS\n#task_categories-text-classification #size_categories-100M<n<1B #language-English #license-apache-2.0 #arxiv-2310.01307 #region-us \n", "# Dataset Card for HC-Var (Human and ChatGPT Texts with Variety)\n\nThis is a collection of human texts and ChatGPT (GPT3.5-Turbo) generated texts, to faciliate studies such as generated texts detection. \nIt includes the texts which are generated / human written to accomplish various language tasks with various approaches. \nThe included language tasks and topics are summarized below. Note: For each language task, this dataset considers 3 different prompts to inquire ChatGPT outputs.\nThe example code to train binary classification models is in this website. \nA technical report on some representative detection methods can be find in this paper.\nThis dataset is collected by Han Xu from Michigan State\nUniversity. Potential issues and suggestions are welcomed to be dicussed in the community panel or emails to xuhan1@URL.", "## Key variables in the dataset: \ntext: The text body (including either human or ChatGPT texts.)\\\ndomain: The language tasks included in this dataset: News, Review, (Essay) Writing, QA\\\ntopic: The topic in each task.\\\nprompt: The prompt used to obtain ChatGPT outputs. \"N/A\" for human texts.\\\npp_id: Each task has 3 prompts to inquire ChatGPT outputs. The \"pp_id\" denotes the index of prompt. \"0\" for human texts. \"1-3\" for ChatGPT texts.\\\nlabel: \"0\" for human texts. \"1\" for ChatGPT texts.", "## To cite this dataset" ]
[ 50, 193, 158, 6 ]
[ "passage: TAGS\n#task_categories-text-classification #size_categories-100M<n<1B #language-English #license-apache-2.0 #arxiv-2310.01307 #region-us \n# Dataset Card for HC-Var (Human and ChatGPT Texts with Variety)\n\nThis is a collection of human texts and ChatGPT (GPT3.5-Turbo) generated texts, to faciliate studies such as generated texts detection. \nIt includes the texts which are generated / human written to accomplish various language tasks with various approaches. \nThe included language tasks and topics are summarized below. Note: For each language task, this dataset considers 3 different prompts to inquire ChatGPT outputs.\nThe example code to train binary classification models is in this website. \nA technical report on some representative detection methods can be find in this paper.\nThis dataset is collected by Han Xu from Michigan State\nUniversity. Potential issues and suggestions are welcomed to be dicussed in the community panel or emails to xuhan1@URL.## Key variables in the dataset: \ntext: The text body (including either human or ChatGPT texts.)\\\ndomain: The language tasks included in this dataset: News, Review, (Essay) Writing, QA\\\ntopic: The topic in each task.\\\nprompt: The prompt used to obtain ChatGPT outputs. \"N/A\" for human texts.\\\npp_id: Each task has 3 prompts to inquire ChatGPT outputs. The \"pp_id\" denotes the index of prompt. \"0\" for human texts. \"1-3\" for ChatGPT texts.\\\nlabel: \"0\" for human texts. \"1\" for ChatGPT texts.## To cite this dataset" ]
729db94518e58a1909defd6e780618c797e37789
# Bangumi Image Base of Imouto Sae Ireba Ii This is the image base of bangumi Imouto sae Ireba Ii, we detected 18 characters, 622 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 30 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 88 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 7 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | N/A | | 3 | 36 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 179 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 28 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 29 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 37 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 7 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | N/A | | 9 | 6 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | N/A | N/A | | 10 | 8 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 10 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 7 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | N/A | | 13 | 10 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 15 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 14 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 69 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | noise | 42 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/imoutosaeirebaii
[ "size_categories:n<1K", "license:mit", "art", "region:us" ]
2023-10-02T14:25:06+00:00
{"license": "mit", "size_categories": ["n<1K"], "tags": ["art"]}
2023-10-02T14:53:08+00:00
[]
[]
TAGS #size_categories-n<1K #license-mit #art #region-us
Bangumi Image Base of Imouto Sae Ireba Ii ========================================= This is the image base of bangumi Imouto sae Ireba Ii, we detected 18 characters, 622 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-n<1K #license-mit #art #region-us \n" ]
[ 23 ]
[ "passage: TAGS\n#size_categories-n<1K #license-mit #art #region-us \n" ]
acd1ad2389426f84fc3cb812c52d3d5dde5c7ce3
# Dataset Card for Nota ## Dataset Description - **Repository:** <https://sprogtek-ressources.digst.govcloud.dk/nota> - **Point of Contact:** [Dan Saattrup Nielsen](mailto:[email protected]) - **Size of downloaded dataset files:** 256.21 GB - **Size of the generated dataset:** 361.62 GB - **Total amount of disk used:** 617.83 GB ### Dataset Summary This data was created by the public institution [Nota](https://nota.dk/), which is part of the Danish Ministry of Culture. Nota has a library audiobooks and audiomagazines for people with reading or sight disabilities. Nota also produces a number of audiobooks and audiomagazines themselves. The dataset consists of audio and associated transcriptions from Nota's audiomagazines "Inspiration" and "Radio/TV". All files related to one reading of one edition of the magazine "Inspiration" or "Radio/TV" has been segmented into bits of 2 - 50 seconds. The dataset has been published as a part of the initiative sprogteknologi.dk, within the [Danish Agency for Digital Government (DIGST)](www.digst.dk). ### Supported Tasks and Leaderboards Automatic speech recognition and speech synthesis are the intended tasks for this dataset. No leaderboard is active at this point. ### Languages The dataset is available in Danish (`da`). ## Dataset Structure ### Data Instances - **Size of downloaded dataset files:** 256.21 GB - **Size of the generated dataset:** 361.62 GB - **Total amount of disk used:** 617.83 GB An example from the dataset looks as follows. ``` { 'audio': {'path': 'RMHL20190028_000140.wav', 'array': array([-0.04023849, -0.06235407, -0.04545404, ..., 0.00014322, 0.00017925, 0.00018811]), 'sampling_rate': 16000}, 'text': '13:05: 24syv Dokumentar 14:05: Spørge Jørgen Vært: Jørgen Leth' } ``` ### Data Fields The data fields are the same among all splits. - `audio`: an `Audio` feature. - `text`: a `string` feature. ### Dataset Statistics There are 98,600 samples in the dataset. #### Transcription Length Distribution ![image/png](https://cdn-uploads.huggingface.co/production/uploads/60d368a613f774189902f555/vyyeMoH3XUXsjgRRxCUuB.png) ## Additional Information ### Dataset Curators [Dan Saattrup Nielsen](https://saattrupdan.github.io/) from the [The Alexandra Institute](https://alexandra.dk/) reorganised the dataset and uploaded it to the Hugging Face Hub. ### Licensing Information The dataset is licensed under the [CC0 license](https://creativecommons.org/share-your-work/public-domain/cc0/).
alexandrainst/nota
[ "task_categories:automatic-speech-recognition", "task_categories:text-to-speech", "size_categories:10K<n<100K", "language:da", "license:cc0-1.0", "region:us" ]
2023-10-02T15:01:30+00:00
{"language": ["da"], "license": "cc0-1.0", "size_categories": ["10K<n<100K"], "task_categories": ["automatic-speech-recognition", "text-to-speech"], "pretty_name": "Nota", "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 361607522962, "num_examples": 98600}], "download_size": 256213612292, "dataset_size": 361607522962}}
2023-10-03T06:51:33+00:00
[]
[ "da" ]
TAGS #task_categories-automatic-speech-recognition #task_categories-text-to-speech #size_categories-10K<n<100K #language-Danish #license-cc0-1.0 #region-us
# Dataset Card for Nota ## Dataset Description - Repository: <URL - Point of Contact: Dan Saattrup Nielsen - Size of downloaded dataset files: 256.21 GB - Size of the generated dataset: 361.62 GB - Total amount of disk used: 617.83 GB ### Dataset Summary This data was created by the public institution Nota, which is part of the Danish Ministry of Culture. Nota has a library audiobooks and audiomagazines for people with reading or sight disabilities. Nota also produces a number of audiobooks and audiomagazines themselves. The dataset consists of audio and associated transcriptions from Nota's audiomagazines "Inspiration" and "Radio/TV". All files related to one reading of one edition of the magazine "Inspiration" or "Radio/TV" has been segmented into bits of 2 - 50 seconds. The dataset has been published as a part of the initiative URL, within the Danish Agency for Digital Government (DIGST). ### Supported Tasks and Leaderboards Automatic speech recognition and speech synthesis are the intended tasks for this dataset. No leaderboard is active at this point. ### Languages The dataset is available in Danish ('da'). ## Dataset Structure ### Data Instances - Size of downloaded dataset files: 256.21 GB - Size of the generated dataset: 361.62 GB - Total amount of disk used: 617.83 GB An example from the dataset looks as follows. ### Data Fields The data fields are the same among all splits. - 'audio': an 'Audio' feature. - 'text': a 'string' feature. ### Dataset Statistics There are 98,600 samples in the dataset. #### Transcription Length Distribution !image/png ## Additional Information ### Dataset Curators Dan Saattrup Nielsen from the The Alexandra Institute reorganised the dataset and uploaded it to the Hugging Face Hub. ### Licensing Information The dataset is licensed under the CC0 license.
[ "# Dataset Card for Nota", "## Dataset Description\n\n- Repository: <URL\n- Point of Contact: Dan Saattrup Nielsen\n- Size of downloaded dataset files: 256.21 GB\n- Size of the generated dataset: 361.62 GB\n- Total amount of disk used: 617.83 GB", "### Dataset Summary\n\nThis data was created by the public institution Nota, which is part of the Danish Ministry of Culture. Nota has a library audiobooks and audiomagazines for people with reading or sight disabilities. Nota also produces a number of audiobooks and audiomagazines themselves. \n\nThe dataset consists of audio and associated transcriptions from Nota's audiomagazines \"Inspiration\" and \"Radio/TV\". All files related to one reading of one edition of the magazine \"Inspiration\" or \"Radio/TV\" has been segmented into bits of 2 - 50 seconds.\n\nThe dataset has been published as a part of the initiative URL, within the Danish Agency for Digital Government (DIGST).", "### Supported Tasks and Leaderboards\n\nAutomatic speech recognition and speech synthesis are the intended tasks for this dataset. No leaderboard is active at this point.", "### Languages\n\nThe dataset is available in Danish ('da').", "## Dataset Structure", "### Data Instances\n\n- Size of downloaded dataset files: 256.21 GB\n- Size of the generated dataset: 361.62 GB\n- Total amount of disk used: 617.83 GB\n\nAn example from the dataset looks as follows.", "### Data Fields\n\nThe data fields are the same among all splits.\n\n- 'audio': an 'Audio' feature.\n- 'text': a 'string' feature.", "### Dataset Statistics\n\nThere are 98,600 samples in the dataset.", "#### Transcription Length Distribution\n\n!image/png", "## Additional Information", "### Dataset Curators\n\nDan Saattrup Nielsen from the The Alexandra\nInstitute reorganised the dataset and uploaded it to the Hugging Face Hub.", "### Licensing Information\n\nThe dataset is licensed under the CC0\nlicense." ]
[ "TAGS\n#task_categories-automatic-speech-recognition #task_categories-text-to-speech #size_categories-10K<n<100K #language-Danish #license-cc0-1.0 #region-us \n", "# Dataset Card for Nota", "## Dataset Description\n\n- Repository: <URL\n- Point of Contact: Dan Saattrup Nielsen\n- Size of downloaded dataset files: 256.21 GB\n- Size of the generated dataset: 361.62 GB\n- Total amount of disk used: 617.83 GB", "### Dataset Summary\n\nThis data was created by the public institution Nota, which is part of the Danish Ministry of Culture. Nota has a library audiobooks and audiomagazines for people with reading or sight disabilities. Nota also produces a number of audiobooks and audiomagazines themselves. \n\nThe dataset consists of audio and associated transcriptions from Nota's audiomagazines \"Inspiration\" and \"Radio/TV\". All files related to one reading of one edition of the magazine \"Inspiration\" or \"Radio/TV\" has been segmented into bits of 2 - 50 seconds.\n\nThe dataset has been published as a part of the initiative URL, within the Danish Agency for Digital Government (DIGST).", "### Supported Tasks and Leaderboards\n\nAutomatic speech recognition and speech synthesis are the intended tasks for this dataset. No leaderboard is active at this point.", "### Languages\n\nThe dataset is available in Danish ('da').", "## Dataset Structure", "### Data Instances\n\n- Size of downloaded dataset files: 256.21 GB\n- Size of the generated dataset: 361.62 GB\n- Total amount of disk used: 617.83 GB\n\nAn example from the dataset looks as follows.", "### Data Fields\n\nThe data fields are the same among all splits.\n\n- 'audio': an 'Audio' feature.\n- 'text': a 'string' feature.", "### Dataset Statistics\n\nThere are 98,600 samples in the dataset.", "#### Transcription Length Distribution\n\n!image/png", "## Additional Information", "### Dataset Curators\n\nDan Saattrup Nielsen from the The Alexandra\nInstitute reorganised the dataset and uploaded it to the Hugging Face Hub.", "### Licensing Information\n\nThe dataset is licensed under the CC0\nlicense." ]
[ 60, 6, 58, 152, 38, 16, 6, 54, 41, 18, 12, 5, 33, 18 ]
[ "passage: TAGS\n#task_categories-automatic-speech-recognition #task_categories-text-to-speech #size_categories-10K<n<100K #language-Danish #license-cc0-1.0 #region-us \n# Dataset Card for Nota## Dataset Description\n\n- Repository: <URL\n- Point of Contact: Dan Saattrup Nielsen\n- Size of downloaded dataset files: 256.21 GB\n- Size of the generated dataset: 361.62 GB\n- Total amount of disk used: 617.83 GB### Dataset Summary\n\nThis data was created by the public institution Nota, which is part of the Danish Ministry of Culture. Nota has a library audiobooks and audiomagazines for people with reading or sight disabilities. Nota also produces a number of audiobooks and audiomagazines themselves. \n\nThe dataset consists of audio and associated transcriptions from Nota's audiomagazines \"Inspiration\" and \"Radio/TV\". All files related to one reading of one edition of the magazine \"Inspiration\" or \"Radio/TV\" has been segmented into bits of 2 - 50 seconds.\n\nThe dataset has been published as a part of the initiative URL, within the Danish Agency for Digital Government (DIGST).### Supported Tasks and Leaderboards\n\nAutomatic speech recognition and speech synthesis are the intended tasks for this dataset. No leaderboard is active at this point.### Languages\n\nThe dataset is available in Danish ('da').## Dataset Structure### Data Instances\n\n- Size of downloaded dataset files: 256.21 GB\n- Size of the generated dataset: 361.62 GB\n- Total amount of disk used: 617.83 GB\n\nAn example from the dataset looks as follows.### Data Fields\n\nThe data fields are the same among all splits.\n\n- 'audio': an 'Audio' feature.\n- 'text': a 'string' feature.### Dataset Statistics\n\nThere are 98,600 samples in the dataset.#### Transcription Length Distribution\n\n!image/png## Additional Information### Dataset Curators\n\nDan Saattrup Nielsen from the The Alexandra\nInstitute reorganised the dataset and uploaded it to the Hugging Face Hub." ]
2b8f72309bc6e9af16a98e8c569e16bd447c9042
A dataset that contains all data in 'ffgcc/NEWS5M' which the corresponding text embedding produced by 'princeton-nlp/unsup-simcse-roberta-large'. The features are transformed to a size of 256 by PCA. The usage: ```python news5M_kd_pca_dataset_unsup = torch.load('./NEWS5M-simcse-roberta-large-embeddings-pca-256/news5M_kd_pca_dataset_unsup.pt') ```
zen-E/NEWS5M-simcse-roberta-large-embeddings-pca-256
[ "task_categories:sentence-similarity", "size_categories:1M<n<10M", "language:en", "region:us" ]
2023-10-02T15:16:54+00:00
{"language": ["en"], "size_categories": ["1M<n<10M"], "task_categories": ["sentence-similarity"]}
2023-10-03T02:03:45+00:00
[]
[ "en" ]
TAGS #task_categories-sentence-similarity #size_categories-1M<n<10M #language-English #region-us
A dataset that contains all data in 'ffgcc/NEWS5M' which the corresponding text embedding produced by 'princeton-nlp/unsup-simcse-roberta-large'. The features are transformed to a size of 256 by PCA. The usage:
[]
[ "TAGS\n#task_categories-sentence-similarity #size_categories-1M<n<10M #language-English #region-us \n" ]
[ 35 ]
[ "passage: TAGS\n#task_categories-sentence-similarity #size_categories-1M<n<10M #language-English #region-us \n" ]
677ab480f2b8b025d907dd9b5fd40e109e0772c2
# Dataset Card for "english_20b" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
manu/english-60b
[ "region:us" ]
2023-10-02T15:30:52+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "dataset_id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 259969046699, "num_examples": 58986336}, {"name": "test", "num_bytes": 43278365, "num_examples": 10000}], "download_size": 151705709032, "dataset_size": 260012325064}}
2023-10-16T04:35:28+00:00
[]
[]
TAGS #region-us
# Dataset Card for "english_20b" More Information needed
[ "# Dataset Card for \"english_20b\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"english_20b\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"english_20b\"\n\nMore Information needed" ]
2146993a983e0604c4f6f3d25a86d778726bd806
# Dataset Card for "code_20b2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
manu/code-20b
[ "region:us" ]
2023-10-02T15:31:42+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "dataset_id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 66209111592, "num_examples": 11692337}, {"name": "test", "num_bytes": 276152957, "num_examples": 48689}], "download_size": 25204013393, "dataset_size": 66485264549}}
2023-10-02T16:00:45+00:00
[]
[]
TAGS #region-us
# Dataset Card for "code_20b2" More Information needed
[ "# Dataset Card for \"code_20b2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"code_20b2\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"code_20b2\"\n\nMore Information needed" ]
79cd237f3cd009f1119ebedf0e7f0eff5f6581d7
# Habitat Humanoids ![](assets/humanoids_cover.gif) Habitat 3.0 provides support for diverse humanoid avatars, displaying different shapes an motions. Avatars are based on the [SMPL-X](https://smpl-x.is.tue.mpg.de/) body model format, a commonly used data-driven parametric human body model that provides a compact representation of 3D human shape and pose. This repository provides a set of stand-alone avatars and motion files to represent humanoids walking and reaching to objects in the Habitat simulator. However, you can also generate new humanoids using the SMPL-X code base, or use motions coming from motion capture or motion generation models. ## Contents We provide a total of 12 textured avatars of neutral, female and male gender and covering different body shapes. For each avatar, we provide a motion file that allows to drive the avatar to walk in a scene, or reach out to objects, using a [controller](https://github.com/facebookresearch/habitat-lab/blob/main/habitat-lab/habitat/articulated_agent_controllers/humanoid_rearrange_controller.py). The folder structure is as follows: ``` ├── habitat_humanoids │ ├── neutral_0 | | ├── neutral_0.ao_config.json | | ├── neutral_0.glb | | ├── neutral_0_motion_data_smplx.pkl | | ├── neutral_0.urdf │ ├── * │ ├── walk_motion │ | ├── CMU_10_04_stageii.npz ``` Where neutral_0 corresponds to the folder of one of the textured avatars. - `neutral_0.ao_config.json`: contains a dictionary with information on how to link the avatar armature and skinning, and the semantic id of the avatar, when using a semantic sensor. - `neutral_0.glb`: contains the skinning and texture information. - `neutral_0_motion_data_smplx.pkl`: contains relevant motion data files, more information below. - `neutral_0.urdf`: contains the armature, built automatically from the SMPL-X body model. - `walk_motion/CMU_10_04_stageii.npz`: contains a clip file from AMASS, used to build our motion file. ### Motion Data File For each avatar, we provide a dictionary stored in `*_motion_data_smplx.pkl` which contains information to animate the character to walk around a scene and reach out to different positions.In particular, the dictionary contains 3 keys to store this information. - `walk_motion`: contains a 130 frame clip of a person performing a walking cycle. In particular, the clip corresponds to the frames 300-430 of the file `CMU/10/10_04_stageii.npz` from AMASS dataset. We provide the raw data in this repository, released under a license detailed below. - `stop_pose`: contains a standing position, taken from s single frame from the motion clip above mentioned. - `left_hand`: Contains a grid of poses 48 generated using [VPoser](https://github.com/nghorbani/human_body_prior), where each pose is optimized to reach a given poisition in 3D. In [HumanoidRearrangeController](https://github.com/facebookresearch/habitat-lab/blob/main/habitat-lab/habitat/articulated_agent_controllers/humanoid_rearrange_controller.py), we provide code to interpolate over these poses to reach multiple 3D positions. - `right_hand`: Contains the same grid of poses to reach positions with the *right hand*. ## Usage Clone this file under `data/`. We provide several files in the [habitat-lab repository](https://github.com/facebookresearch/habitat-lab) to instantiate and move the avatars around the scene. ## License The 12 provided avatars, along with their textures, and the reaching positions stored in `left_hand` and `right_hand` are released under a [CC-BY-NC 4.0 License](https://creativecommons.org/licenses/by-nc/4.0/deed.en). The motion data stored in `walk_motion` and `stop_pose`, as well as the original file `CMU_10_04_stageii.npz` is released under the [SMPL Body Motion File License](https://smpl.is.tue.mpg.de/bodylicense.html), a Creative Commons Attribution 4.0 International License. For support or inquiries about more SMPL Body Mo.on Files for commercial use, please contact [email protected]. git
ai-habitat/habitat_humanoids
[ "license:cc-by-nc-sa-4.0", "region:us" ]
2023-10-02T15:50:08+00:00
{"license": "cc-by-nc-sa-4.0", "viewer": false}
2023-10-18T16:36:33+00:00
[]
[]
TAGS #license-cc-by-nc-sa-4.0 #region-us
# Habitat Humanoids ![](assets/humanoids_cover.gif) Habitat 3.0 provides support for diverse humanoid avatars, displaying different shapes an motions. Avatars are based on the SMPL-X body model format, a commonly used data-driven parametric human body model that provides a compact representation of 3D human shape and pose. This repository provides a set of stand-alone avatars and motion files to represent humanoids walking and reaching to objects in the Habitat simulator. However, you can also generate new humanoids using the SMPL-X code base, or use motions coming from motion capture or motion generation models. ## Contents We provide a total of 12 textured avatars of neutral, female and male gender and covering different body shapes. For each avatar, we provide a motion file that allows to drive the avatar to walk in a scene, or reach out to objects, using a controller. The folder structure is as follows: Where neutral_0 corresponds to the folder of one of the textured avatars. - 'neutral_0.ao_config.json': contains a dictionary with information on how to link the avatar armature and skinning, and the semantic id of the avatar, when using a semantic sensor. - 'neutral_0.glb': contains the skinning and texture information. - 'neutral_0_motion_data_smplx.pkl': contains relevant motion data files, more information below. - 'neutral_0.urdf': contains the armature, built automatically from the SMPL-X body model. - 'walk_motion/CMU_10_04_stageii.npz': contains a clip file from AMASS, used to build our motion file. ### Motion Data File For each avatar, we provide a dictionary stored in '*_motion_data_smplx.pkl' which contains information to animate the character to walk around a scene and reach out to different positions.In particular, the dictionary contains 3 keys to store this information. - 'walk_motion': contains a 130 frame clip of a person performing a walking cycle. In particular, the clip corresponds to the frames 300-430 of the file 'CMU/10/10_04_stageii.npz' from AMASS dataset. We provide the raw data in this repository, released under a license detailed below. - 'stop_pose': contains a standing position, taken from s single frame from the motion clip above mentioned. - 'left_hand': Contains a grid of poses 48 generated using VPoser, where each pose is optimized to reach a given poisition in 3D. In HumanoidRearrangeController, we provide code to interpolate over these poses to reach multiple 3D positions. - 'right_hand': Contains the same grid of poses to reach positions with the *right hand*. ## Usage Clone this file under 'data/'. We provide several files in the habitat-lab repository to instantiate and move the avatars around the scene. ## License The 12 provided avatars, along with their textures, and the reaching positions stored in 'left_hand' and 'right_hand' are released under a CC-BY-NC 4.0 License. The motion data stored in 'walk_motion' and 'stop_pose', as well as the original file 'CMU_10_04_stageii.npz' is released under the SMPL Body Motion File License, a Creative Commons Attribution 4.0 International License. For support or inquiries about more SMPL Body URL Files for commercial use, please contact info@URL. git
[ "# Habitat Humanoids\n\n![](assets/humanoids_cover.gif)\n\n\nHabitat 3.0 provides support for diverse humanoid avatars, displaying different shapes an motions. Avatars are based on the SMPL-X body model format, a commonly used data-driven parametric human body model that provides a compact representation of 3D human shape and pose. \n\nThis repository provides a set of stand-alone avatars and motion files to represent humanoids walking and reaching to objects in the Habitat simulator. However, you can also generate new humanoids using the SMPL-X code base, or use motions coming from motion capture or motion generation models.", "## Contents\n\nWe provide a total of 12 textured avatars of neutral, female and male gender and covering different body shapes. For each avatar, we provide a motion file that allows to drive the avatar to walk in a scene, or reach out to objects, using a controller. \n\nThe folder structure is as follows:\n\n\n\nWhere neutral_0 corresponds to the folder of one of the textured avatars. \n\n- 'neutral_0.ao_config.json': contains a dictionary with information on how to link the avatar armature and skinning, and the semantic id of the avatar, when using a semantic sensor.\n- 'neutral_0.glb': contains the skinning and texture information.\n- 'neutral_0_motion_data_smplx.pkl': contains relevant motion data files, more information below.\n- 'neutral_0.urdf': contains the armature, built automatically from the SMPL-X body model. \n- 'walk_motion/CMU_10_04_stageii.npz': contains a clip file from AMASS, used to build our motion file.", "### Motion Data File\n\nFor each avatar, we provide a dictionary stored in '*_motion_data_smplx.pkl' which contains information to animate the character to walk around a scene and reach out to different positions.In particular, the dictionary contains 3 keys to store this information.\n\n- 'walk_motion': contains a 130 frame clip of a person performing a walking cycle. In particular, the clip corresponds to the frames 300-430 of the file 'CMU/10/10_04_stageii.npz' from AMASS dataset. We provide the raw data in this repository, released under a license detailed below.\n- 'stop_pose': contains a standing position, taken from s single frame from the motion clip above mentioned.\n- 'left_hand': Contains a grid of poses 48 generated using VPoser, where each pose is optimized to reach a given poisition in 3D. In HumanoidRearrangeController, we provide code to interpolate over these poses to reach multiple 3D positions. \n- 'right_hand': Contains the same grid of poses to reach positions with the *right hand*.", "## Usage\n\nClone this file under 'data/'. We provide several files in the habitat-lab repository to instantiate and move the avatars around the scene.", "## License\n\nThe 12 provided avatars, along with their textures, and the reaching positions stored in 'left_hand' and 'right_hand' are released under a CC-BY-NC 4.0 License. \n\nThe motion data stored in 'walk_motion' and 'stop_pose', as well as the original file 'CMU_10_04_stageii.npz' is released under the SMPL Body Motion File License, a Creative Commons Attribution 4.0 International License. For support or inquiries about more SMPL Body URL Files for commercial use, please contact info@URL.\ngit" ]
[ "TAGS\n#license-cc-by-nc-sa-4.0 #region-us \n", "# Habitat Humanoids\n\n![](assets/humanoids_cover.gif)\n\n\nHabitat 3.0 provides support for diverse humanoid avatars, displaying different shapes an motions. Avatars are based on the SMPL-X body model format, a commonly used data-driven parametric human body model that provides a compact representation of 3D human shape and pose. \n\nThis repository provides a set of stand-alone avatars and motion files to represent humanoids walking and reaching to objects in the Habitat simulator. However, you can also generate new humanoids using the SMPL-X code base, or use motions coming from motion capture or motion generation models.", "## Contents\n\nWe provide a total of 12 textured avatars of neutral, female and male gender and covering different body shapes. For each avatar, we provide a motion file that allows to drive the avatar to walk in a scene, or reach out to objects, using a controller. \n\nThe folder structure is as follows:\n\n\n\nWhere neutral_0 corresponds to the folder of one of the textured avatars. \n\n- 'neutral_0.ao_config.json': contains a dictionary with information on how to link the avatar armature and skinning, and the semantic id of the avatar, when using a semantic sensor.\n- 'neutral_0.glb': contains the skinning and texture information.\n- 'neutral_0_motion_data_smplx.pkl': contains relevant motion data files, more information below.\n- 'neutral_0.urdf': contains the armature, built automatically from the SMPL-X body model. \n- 'walk_motion/CMU_10_04_stageii.npz': contains a clip file from AMASS, used to build our motion file.", "### Motion Data File\n\nFor each avatar, we provide a dictionary stored in '*_motion_data_smplx.pkl' which contains information to animate the character to walk around a scene and reach out to different positions.In particular, the dictionary contains 3 keys to store this information.\n\n- 'walk_motion': contains a 130 frame clip of a person performing a walking cycle. In particular, the clip corresponds to the frames 300-430 of the file 'CMU/10/10_04_stageii.npz' from AMASS dataset. We provide the raw data in this repository, released under a license detailed below.\n- 'stop_pose': contains a standing position, taken from s single frame from the motion clip above mentioned.\n- 'left_hand': Contains a grid of poses 48 generated using VPoser, where each pose is optimized to reach a given poisition in 3D. In HumanoidRearrangeController, we provide code to interpolate over these poses to reach multiple 3D positions. \n- 'right_hand': Contains the same grid of poses to reach positions with the *right hand*.", "## Usage\n\nClone this file under 'data/'. We provide several files in the habitat-lab repository to instantiate and move the avatars around the scene.", "## License\n\nThe 12 provided avatars, along with their textures, and the reaching positions stored in 'left_hand' and 'right_hand' are released under a CC-BY-NC 4.0 License. \n\nThe motion data stored in 'walk_motion' and 'stop_pose', as well as the original file 'CMU_10_04_stageii.npz' is released under the SMPL Body Motion File License, a Creative Commons Attribution 4.0 International License. For support or inquiries about more SMPL Body URL Files for commercial use, please contact info@URL.\ngit" ]
[ 19, 150, 250, 268, 37, 130 ]
[ "passage: TAGS\n#license-cc-by-nc-sa-4.0 #region-us \n# Habitat Humanoids\n\n![](assets/humanoids_cover.gif)\n\n\nHabitat 3.0 provides support for diverse humanoid avatars, displaying different shapes an motions. Avatars are based on the SMPL-X body model format, a commonly used data-driven parametric human body model that provides a compact representation of 3D human shape and pose. \n\nThis repository provides a set of stand-alone avatars and motion files to represent humanoids walking and reaching to objects in the Habitat simulator. However, you can also generate new humanoids using the SMPL-X code base, or use motions coming from motion capture or motion generation models.## Contents\n\nWe provide a total of 12 textured avatars of neutral, female and male gender and covering different body shapes. For each avatar, we provide a motion file that allows to drive the avatar to walk in a scene, or reach out to objects, using a controller. \n\nThe folder structure is as follows:\n\n\n\nWhere neutral_0 corresponds to the folder of one of the textured avatars. \n\n- 'neutral_0.ao_config.json': contains a dictionary with information on how to link the avatar armature and skinning, and the semantic id of the avatar, when using a semantic sensor.\n- 'neutral_0.glb': contains the skinning and texture information.\n- 'neutral_0_motion_data_smplx.pkl': contains relevant motion data files, more information below.\n- 'neutral_0.urdf': contains the armature, built automatically from the SMPL-X body model. \n- 'walk_motion/CMU_10_04_stageii.npz': contains a clip file from AMASS, used to build our motion file." ]
895c6d8ec84887475136d589b32c711f4fb17a40
# Dataset Card for "uit_data_sample" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
pphuc25/uit_data_sample
[ "region:us" ]
2023-10-02T15:54:27+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "claim", "dtype": "string"}, {"name": "verdict", "dtype": "string"}, {"name": "evidence", "dtype": "string"}, {"name": "domain", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4167523, "num_examples": 1000}], "download_size": 1991987, "dataset_size": 4167523}}
2023-10-02T15:54:52+00:00
[]
[]
TAGS #region-us
# Dataset Card for "uit_data_sample" More Information needed
[ "# Dataset Card for \"uit_data_sample\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"uit_data_sample\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"uit_data_sample\"\n\nMore Information needed" ]
3406f4017c48282bc5ebb9a4813f3c58e569b328
# Dataset Card for "fake-library-chats-with-sentiment" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
davanstrien/fake-library-chats-with-sentiment
[ "region:us" ]
2023-10-02T16:06:02+00:00
{"dataset_info": [{"config_name": "default", "features": [{"name": "message", "dtype": "string"}, {"name": "message sentiment", "dtype": {"class_label": {"names": {"0": "positive", "1": "negative", "2": "neutral"}}}}], "splits": [{"name": "train", "num_bytes": 674584, "num_examples": 10000}], "download_size": 0, "dataset_size": 674584}, {"config_name": "demo", "features": [{"name": "message", "dtype": "string"}, {"name": "message sentiment", "dtype": {"class_label": {"names": {"0": "positive", "1": "negative", "2": "neutral"}}}}], "splits": [{"name": "train", "num_bytes": 674584, "num_examples": 10000}], "download_size": 28880, "dataset_size": 674584}], "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}, {"config_name": "demo", "data_files": [{"split": "train", "path": "demo/train-*"}]}]}
2023-10-02T19:21:26+00:00
[]
[]
TAGS #region-us
# Dataset Card for "fake-library-chats-with-sentiment" More Information needed
[ "# Dataset Card for \"fake-library-chats-with-sentiment\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"fake-library-chats-with-sentiment\"\n\nMore Information needed" ]
[ 6, 24 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"fake-library-chats-with-sentiment\"\n\nMore Information needed" ]
e06e9903a7554795afaad4507f9c6f5c58f223b5
<i>Im very stupid, and i dont know how to show right images and tags.</i><br> <i>So, i'll just pin .zip file with dataset.</i> Well, dataset contain 30 images and 30 tags accordingly, that dataset i used for make my own LoRA.<br> I posted it in the public for nothing without any reason:D
oblivisheee/vladilenna-mirize-dataset
[ "license:creativeml-openrail-m", "art", "region:us" ]
2023-10-02T16:16:59+00:00
{"license": "creativeml-openrail-m", "tags": ["art"]}
2023-10-02T16:46:05+00:00
[]
[]
TAGS #license-creativeml-openrail-m #art #region-us
<i>Im very stupid, and i dont know how to show right images and tags.</i><br> <i>So, i'll just pin .zip file with dataset.</i> Well, dataset contain 30 images and 30 tags accordingly, that dataset i used for make my own LoRA.<br> I posted it in the public for nothing without any reason:D
[]
[ "TAGS\n#license-creativeml-openrail-m #art #region-us \n" ]
[ 20 ]
[ "passage: TAGS\n#license-creativeml-openrail-m #art #region-us \n" ]
586c0d0ddeb022fccd909c7b415cc2ca8660baa4
# **Retrieval-Augmented Generation (RAG) Dataset 12000** **Retrieval-Augmented Generation (RAG) Dataset 12000 is an English dataset designed for RAG-optimized models, built by [Neural Bridge AI](https://www.neuralbridge.ai/), and released under [Apache license 2.0](https://www.apache.org/licenses/LICENSE-2.0.html).** ## **Dataset Description** #### Dataset Summary Retrieval-Augmented Generation (RAG) enhances large language models (LLMs) by allowing them to consult an external authoritative knowledge base before generating responses. This approach significantly boosts the models' ability to produce relevant, accurate, and context-specific output by extending their capabilities to specialized domains or an organization's internal data, without the need for retraining. RAG offers a cost-effective method to leverage the vast data processing power of LLMs, equipped with billions of parameters, for tasks such as question-answering, language translation, and sentence completion, ensuring that the output is always up-to-date and applicable to various contexts. RAG's importance lies in its potential to address the inherent challenges of LLMs, such as unpredictability in responses, reliance on static and potentially outdated training data, and the risk of disseminating incorrect or non-authoritative information. These issues can negatively affect user trust in AI-powered applications, making RAG's ability to guide LLMs toward authoritative sources for information retrieval invaluable. RAG has multiple benefits, including cost-effective implementation and maintenance, access to current information, improved user trust through accurate information and source attribution, and greater control for developers over the information retrieval process. This approach allows for the dynamic updating of LLMs with the latest research, statistics, or news, directly addressing the challenges of maintaining relevancy and accuracy in rapidly changing knowledge landscapes. Additionally, it empowers organizations to deploy generative AI more confidently across a wider range of applications, enhancing both the user experience and the reliability of AI-driven interactions. Retrieval-Augmented Generation (RAG) Dataset 12000 dataset is a triple-feature collection, with each entry containing a "context", "question", and "answer" fields, designed to help build RAG-optimized models. This data consists of 12000 entries, and the context data is from [Falcon RefinedWeb](https://huggingface.co/datasets/tiiuae/falcon-refinedweb). ```python from datasets import load_dataset rag_dataset = load_dataset("neural-bridge/rag-dataset-12000") ``` #### Languages The text in the dataset is in English. The associated BCP-47 code is `en`. ## **Dataset Structure** #### Data Instances A typical data point comprises a context, a question about the context, and an answer for the question. The context is obtained from [Falcon RefinedWeb](https://huggingface.co/datasets/tiiuae/falcon-refinedweb), and the question and answer for each data point are generated by GPT-4. An example from the dataset looks like the following: ``` { context: ... question: ... answer: ... } ``` #### Data Fields - `context`: A string consisting of a range of tokens. - `question`: A string consisting of a question related to the context. - `answer`: A string consisting of an answer for the question. #### Data Splits The data is split into a training and test set. The split sizes are as follow: | | Train | Test | | ----- | ------ | ---- | | RAG Dataset 12000 | 9600 | 2400 | ## Source Data The data points in the dataset are from the [Falcon RefinedWeb](https://huggingface.co/datasets/tiiuae/falcon-refinedweb) dataset. ## **Neural Bridge AI RAG Datasets Index** | Model | Link | | ----- | ------ | | RAG Full 20000 | [link](https://huggingface.co/datasets/neural-bridge/rag-full-20000) | | RAG Dataset 12000 | [link](https://huggingface.co/datasets/neural-bridge/rag-dataset-12000) | | RAG Dataset 1200 | [link](https://huggingface.co/datasets/neural-bridge/rag-dataset-1200) | | RAG Hallucination Dataset 1000 | [link](https://huggingface.co/datasets/neural-bridge/rag-hallucination-dataset-1000) | ## **License** This public extract is made available under [Apache license 2.0](https://www.apache.org/licenses/LICENSE-2.0.html). Users should also abide to the [Falcon RefinedWeb](https://huggingface.co/datasets/tiiuae/falcon-refinedweb) ToU.
neural-bridge/rag-dataset-12000
[ "task_categories:question-answering", "size_categories:10K<n<100K", "language:en", "license:apache-2.0", "retrieval-augmented-generation", "region:us" ]
2023-10-02T16:18:39+00:00
{"language": ["en"], "license": "apache-2.0", "size_categories": ["10K<n<100K"], "task_categories": ["question-answering"], "dataset_info": {"features": [{"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}], "splits": [{"name": "train", "num_examples": 9600}, {"name": "test", "num_examples": 2400}]}, "tags": ["retrieval-augmented-generation"]}
2024-02-05T18:25:13+00:00
[]
[ "en" ]
TAGS #task_categories-question-answering #size_categories-10K<n<100K #language-English #license-apache-2.0 #retrieval-augmented-generation #region-us
Retrieval-Augmented Generation (RAG) Dataset 12000 ================================================== Retrieval-Augmented Generation (RAG) Dataset 12000 is an English dataset designed for RAG-optimized models, built by Neural Bridge AI, and released under Apache license 2.0. Dataset Description ------------------- #### Dataset Summary Retrieval-Augmented Generation (RAG) enhances large language models (LLMs) by allowing them to consult an external authoritative knowledge base before generating responses. This approach significantly boosts the models' ability to produce relevant, accurate, and context-specific output by extending their capabilities to specialized domains or an organization's internal data, without the need for retraining. RAG offers a cost-effective method to leverage the vast data processing power of LLMs, equipped with billions of parameters, for tasks such as question-answering, language translation, and sentence completion, ensuring that the output is always up-to-date and applicable to various contexts. RAG's importance lies in its potential to address the inherent challenges of LLMs, such as unpredictability in responses, reliance on static and potentially outdated training data, and the risk of disseminating incorrect or non-authoritative information. These issues can negatively affect user trust in AI-powered applications, making RAG's ability to guide LLMs toward authoritative sources for information retrieval invaluable. RAG has multiple benefits, including cost-effective implementation and maintenance, access to current information, improved user trust through accurate information and source attribution, and greater control for developers over the information retrieval process. This approach allows for the dynamic updating of LLMs with the latest research, statistics, or news, directly addressing the challenges of maintaining relevancy and accuracy in rapidly changing knowledge landscapes. Additionally, it empowers organizations to deploy generative AI more confidently across a wider range of applications, enhancing both the user experience and the reliability of AI-driven interactions. Retrieval-Augmented Generation (RAG) Dataset 12000 dataset is a triple-feature collection, with each entry containing a "context", "question", and "answer" fields, designed to help build RAG-optimized models. This data consists of 12000 entries, and the context data is from Falcon RefinedWeb. #### Languages The text in the dataset is in English. The associated BCP-47 code is 'en'. Dataset Structure ----------------- #### Data Instances A typical data point comprises a context, a question about the context, and an answer for the question. The context is obtained from Falcon RefinedWeb, and the question and answer for each data point are generated by GPT-4. An example from the dataset looks like the following: #### Data Fields * 'context': A string consisting of a range of tokens. * 'question': A string consisting of a question related to the context. * 'answer': A string consisting of an answer for the question. #### Data Splits The data is split into a training and test set. The split sizes are as follow: Train: RAG Dataset 12000, Test: 9600 Source Data ----------- The data points in the dataset are from the Falcon RefinedWeb dataset. Neural Bridge AI RAG Datasets Index ----------------------------------- License ------- This public extract is made available under Apache license 2.0. Users should also abide to the Falcon RefinedWeb ToU.
[ "#### Dataset Summary\n\n\nRetrieval-Augmented Generation (RAG) enhances large language models (LLMs) by allowing them to consult an external authoritative knowledge base before generating responses. This approach significantly boosts the models' ability to produce relevant, accurate, and context-specific output by extending their capabilities to specialized domains or an organization's internal data, without the need for retraining. RAG offers a cost-effective method to leverage the vast data processing power of LLMs, equipped with billions of parameters, for tasks such as question-answering, language translation, and sentence completion, ensuring that the output is always up-to-date and applicable to various contexts.\n\n\nRAG's importance lies in its potential to address the inherent challenges of LLMs, such as unpredictability in responses, reliance on static and potentially outdated training data, and the risk of disseminating incorrect or non-authoritative information. These issues can negatively affect user trust in AI-powered applications, making RAG's ability to guide LLMs toward authoritative sources for information retrieval invaluable.\n\n\nRAG has multiple benefits, including cost-effective implementation and maintenance, access to current information, improved user trust through accurate information and source attribution, and greater control for developers over the information retrieval process. This approach allows for the dynamic updating of LLMs with the latest research, statistics, or news, directly addressing the challenges of maintaining relevancy and accuracy in rapidly changing knowledge landscapes. Additionally, it empowers organizations to deploy generative AI more confidently across a wider range of applications, enhancing both the user experience and the reliability of AI-driven interactions.\n\n\nRetrieval-Augmented Generation (RAG) Dataset 12000 dataset is a triple-feature collection, with each entry containing a \"context\", \"question\", and \"answer\" fields, designed to help build RAG-optimized models. This data consists of 12000 entries, and the context data is from Falcon RefinedWeb.", "#### Languages\n\n\nThe text in the dataset is in English. The associated BCP-47 code is 'en'.\n\n\nDataset Structure\n-----------------", "#### Data Instances\n\n\nA typical data point comprises a context, a question about the context, and an answer for the question. The context is obtained from Falcon RefinedWeb, and the question and answer for each data point are generated by GPT-4.\n\n\nAn example from the dataset looks like the following:", "#### Data Fields\n\n\n* 'context': A string consisting of a range of tokens.\n* 'question': A string consisting of a question related to the context.\n* 'answer': A string consisting of an answer for the question.", "#### Data Splits\n\n\nThe data is split into a training and test set. The split sizes are as follow:\n\n\nTrain: RAG Dataset 12000, Test: 9600\n\n\nSource Data\n-----------\n\n\nThe data points in the dataset are from the Falcon RefinedWeb dataset.\n\n\nNeural Bridge AI RAG Datasets Index\n-----------------------------------\n\n\n\nLicense\n-------\n\n\nThis public extract is made available under Apache license 2.0. Users should also abide to the Falcon RefinedWeb ToU." ]
[ "TAGS\n#task_categories-question-answering #size_categories-10K<n<100K #language-English #license-apache-2.0 #retrieval-augmented-generation #region-us \n", "#### Dataset Summary\n\n\nRetrieval-Augmented Generation (RAG) enhances large language models (LLMs) by allowing them to consult an external authoritative knowledge base before generating responses. This approach significantly boosts the models' ability to produce relevant, accurate, and context-specific output by extending their capabilities to specialized domains or an organization's internal data, without the need for retraining. RAG offers a cost-effective method to leverage the vast data processing power of LLMs, equipped with billions of parameters, for tasks such as question-answering, language translation, and sentence completion, ensuring that the output is always up-to-date and applicable to various contexts.\n\n\nRAG's importance lies in its potential to address the inherent challenges of LLMs, such as unpredictability in responses, reliance on static and potentially outdated training data, and the risk of disseminating incorrect or non-authoritative information. These issues can negatively affect user trust in AI-powered applications, making RAG's ability to guide LLMs toward authoritative sources for information retrieval invaluable.\n\n\nRAG has multiple benefits, including cost-effective implementation and maintenance, access to current information, improved user trust through accurate information and source attribution, and greater control for developers over the information retrieval process. This approach allows for the dynamic updating of LLMs with the latest research, statistics, or news, directly addressing the challenges of maintaining relevancy and accuracy in rapidly changing knowledge landscapes. Additionally, it empowers organizations to deploy generative AI more confidently across a wider range of applications, enhancing both the user experience and the reliability of AI-driven interactions.\n\n\nRetrieval-Augmented Generation (RAG) Dataset 12000 dataset is a triple-feature collection, with each entry containing a \"context\", \"question\", and \"answer\" fields, designed to help build RAG-optimized models. This data consists of 12000 entries, and the context data is from Falcon RefinedWeb.", "#### Languages\n\n\nThe text in the dataset is in English. The associated BCP-47 code is 'en'.\n\n\nDataset Structure\n-----------------", "#### Data Instances\n\n\nA typical data point comprises a context, a question about the context, and an answer for the question. The context is obtained from Falcon RefinedWeb, and the question and answer for each data point are generated by GPT-4.\n\n\nAn example from the dataset looks like the following:", "#### Data Fields\n\n\n* 'context': A string consisting of a range of tokens.\n* 'question': A string consisting of a question related to the context.\n* 'answer': A string consisting of an answer for the question.", "#### Data Splits\n\n\nThe data is split into a training and test set. The split sizes are as follow:\n\n\nTrain: RAG Dataset 12000, Test: 9600\n\n\nSource Data\n-----------\n\n\nThe data points in the dataset are from the Falcon RefinedWeb dataset.\n\n\nNeural Bridge AI RAG Datasets Index\n-----------------------------------\n\n\n\nLicense\n-------\n\n\nThis public extract is made available under Apache license 2.0. Users should also abide to the Falcon RefinedWeb ToU." ]
[ 53, 483, 32, 68, 57, 103 ]
[ "passage: TAGS\n#task_categories-question-answering #size_categories-10K<n<100K #language-English #license-apache-2.0 #retrieval-augmented-generation #region-us \n" ]
721348ce46323f78ad84c97a52499c43dca733e6
# Dataset Card for "390d6002" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
result-kand2-sdxl-wuerst-karlo/390d6002
[ "region:us" ]
2023-10-02T16:22:42+00:00
{"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 177, "num_examples": 10}], "download_size": 1344, "dataset_size": 177}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-02T16:22:43+00:00
[]
[]
TAGS #region-us
# Dataset Card for "390d6002" More Information needed
[ "# Dataset Card for \"390d6002\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"390d6002\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"390d6002\"\n\nMore Information needed" ]
ae45bcbe6684a89d619dda2dd4fa9c24de6d1962
<i>Idk how to publish dataset correct</i> So, i published that dataset for public, because... idk for what, just like that. Dataset contain 49 images and 49 tags, you could download it via zip file.
oblivisheee/ayase-saki-dataset
[ "license:creativeml-openrail-m", "art", "region:us" ]
2023-10-02T16:24:31+00:00
{"license": "creativeml-openrail-m", "tags": ["art"]}
2023-10-02T16:49:40+00:00
[]
[]
TAGS #license-creativeml-openrail-m #art #region-us
<i>Idk how to publish dataset correct</i> So, i published that dataset for public, because... idk for what, just like that. Dataset contain 49 images and 49 tags, you could download it via zip file.
[]
[ "TAGS\n#license-creativeml-openrail-m #art #region-us \n" ]
[ 20 ]
[ "passage: TAGS\n#license-creativeml-openrail-m #art #region-us \n" ]
ea2dfc83f6a3bf6e08dedcf09ce11cf2c285b12a
# Dataset Card for "shikomori-asr" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
nairaxo/shikomori-asr
[ "region:us" ]
2023-10-02T17:15:40+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "path", "dtype": "string"}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 375585328.0, "num_examples": 787}], "download_size": 373013374, "dataset_size": 375585328.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-02T17:19:28+00:00
[]
[]
TAGS #region-us
# Dataset Card for "shikomori-asr" More Information needed
[ "# Dataset Card for \"shikomori-asr\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"shikomori-asr\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"shikomori-asr\"\n\nMore Information needed" ]
619b24e75f57b0f9f5321cbb6d2e7f0ee95d91fe
# Dataset Card for "medtrain_2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
katielink/medtrain_textbook-quality
[ "region:us" ]
2023-10-02T17:32:47+00:00
{"dataset_info": {"features": [{"name": "raw_flashcards", "dtype": "string"}, {"name": "clean_flashcards", "dtype": "string"}, {"name": "facts", "dtype": "string"}, {"name": "tags", "dtype": "string"}, {"name": "textbook_gpt-3.5-turbo", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 142178, "num_examples": 50}], "download_size": 81531, "dataset_size": 142178}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-02T18:21:20+00:00
[]
[]
TAGS #region-us
# Dataset Card for "medtrain_2" More Information needed
[ "# Dataset Card for \"medtrain_2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"medtrain_2\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"medtrain_2\"\n\nMore Information needed" ]
898969dedb9adabba822acf55235ae5b249e0f2a
# Dataset Card for "FarsTail-Instruct-LLM" ## Dataset Description FarsTail Instruct LLM dataset is a Persian (Farsi) dataset aimed to be used for text generation tasks by large language models. The dataset is in prompt/completion (instruction) format. The dataset is created from [FarsTail dataset](https://github.com/dml-qom/FarsTail/) by changing the format of "e" (entailment) and "c" (contradiction) cases and adding some command and completion templates. ## Prompt and Completion Templates Here are the templates used for prompt and completion in each datapoint. The templates are assined randomly. ### Templates Used for "e" Cases | <p style="text-align: center;">Prompt Templates</p> | <p style="text-align: center;">Completion Templates</p> | | ---------------- | -------------------- | | <div dir="rtl" style="text-align: justify">از متن زیر چه نتیجه‌ای می‌توان گرفت: </div> | <div dir="rtl" style="text-align: justify"> از متن داده شده می‌توان نتیجه زیر را گرفت:</div> | | <div dir="rtl" style="text-align: justify">جمله‌ای بنویس که می‌تواند نتیجه‌ متن زیر باشد: </div> | <div dir="rtl" style="text-align: justify">جمله‌ زیر را می‌توان از متن داده شده نتیجه گرفت:</div> | | <div dir="rtl" style="text-align: justify">جمله‌ای بنویس که می‌توان از متن زیر نتیجه گرفت: </div> | <div dir="rtl" style="text-align: justify">با توجه به متن داده شده می‌توان نتیجه زیر را گرفت:</div> | ### Templates Used for "c" Cases | <p style="text-align: center;">Prompt Templates</p> | <p style="text-align: center;">Completion Templates</p> | | ---------------- | -------------------- | | <div dir="rtl" style="text-align: justify">جمله‌ای بنویس که ناقض متن زیر باشد: </div> | <div dir="rtl" style="text-align: justify">جمله زیر متن داده شده را نقض می‌کند: </div> | | <div dir="rtl" style="text-align: justify">جمله‌ای بنویس که متن زیر را نقض کند: </div> | <div dir="rtl" style="text-align: justify">جمله زیر با متن داده شده تناقض دارد: </div> | | <div dir="rtl" style="text-align: justify">جمله‌ای متناقض با متن زیر بنویس: </div> | <div dir="rtl" style="text-align: justify">جمله زیر ناقض متن داده شده است: </div> | For each datapoint, one prompt template and one completion template are used. As mentioned earlier, the templates are assined randomly. Each unique combination of a prompt template and completion template has got a unique id which is included in the `template_id` column. Here are the mappings between the template combinations and the IDs. ### Template Combination for "e" Cases | <p style="text-align: center;">ID</p> | <p style="text-align: center;">Prompt Templates</p> | <p style="text-align: center;">Completion Templates</p> | | ---------------- | -------------------- | -------------------- | | <p style="text-align: center;">0</p> | <div dir="rtl" style="text-align: justify">از متن زیر چه نتیجه‌ای می‌توان گرفت: </div> | <div dir="rtl" style="text-align: justify"> از متن داده شده می‌توان نتیجه زیر را گرفت:</div> | | <p style="text-align: center;">1</p> | <div dir="rtl" style="text-align: justify">از متن زیر چه نتیجه‌ای می‌توان گرفت: </div> | <div dir="rtl" style="text-align: justify">جمله‌ زیر را می‌توان از متن داده شده نتیجه گرفت:</div> | | <p style="text-align: center;">2</p> | <div dir="rtl" style="text-align: justify">از متن زیر چه نتیجه‌ای می‌توان گرفت: </div> | <div dir="rtl" style="text-align: justify">با توجه به متن داده شده می‌توان نتیجه زیر را گرفت:</div> | | <p style="text-align: center;">3</p> | <div dir="rtl" style="text-align: justify">جمله‌ای بنویس که می‌تواند نتیجه‌ متن زیر باشد: </div> | <div dir="rtl" style="text-align: justify"> از متن داده شده می‌توان نتیجه زیر را گرفت:</div> | | <p style="text-align: center;">4</p> | <div dir="rtl" style="text-align: justify">جمله‌ای بنویس که می‌تواند نتیجه‌ متن زیر باشد: </div> | <div dir="rtl" style="text-align: justify">جمله‌ زیر را می‌توان از متن داده شده نتیجه گرفت:</div> | | <p style="text-align: center;">5</p> | <div dir="rtl" style="text-align: justify">جمله‌ای بنویس که می‌تواند نتیجه‌ متن زیر باشد: </div> | <div dir="rtl" style="text-align: justify">با توجه به متن داده شده می‌توان نتیجه زیر را گرفت:</div> | | <p style="text-align: center;">6</p> | <div dir="rtl" style="text-align: justify">جمله‌ای بنویس که می‌توان از متن زیر نتیجه گرفت: </div> | <div dir="rtl" style="text-align: justify"> از متن داده شده می‌توان نتیجه زیر را گرفت:</div> | | <p style="text-align: center;">7</p> | <div dir="rtl" style="text-align: justify">جمله‌ای بنویس که می‌توان از متن زیر نتیجه گرفت: </div> | <div dir="rtl" style="text-align: justify">جمله‌ زیر را می‌توان از متن داده شده نتیجه گرفت:</div> | | <p style="text-align: center;">8</p> | <div dir="rtl" style="text-align: justify">جمله‌ای بنویس که می‌توان از متن زیر نتیجه گرفت: </div> | <div dir="rtl" style="text-align: justify">با توجه به متن داده شده می‌توان نتیجه زیر را گرفت:</div> | ### Template Combination for "e" Cases | <p style="text-align: center;">ID</p> | <p style="text-align: center;">Prompt Templates</p> | <p style="text-align: center;">Completion Templates</p> | | ---------------- | -------------------- | -------------------- | | <p style="text-align: center;">9</p> | <div dir="rtl" style="text-align: justify">جمله‌ای بنویس که ناقض متن زیر باشد: </div> | <div dir="rtl" style="text-align: justify">جمله زیر متن داده شده را نقض می‌کند: </div> | | <p style="text-align: center;">10</p> | <div dir="rtl" style="text-align: justify">جمله‌ای بنویس که ناقض متن زیر باشد: </div> | <div dir="rtl" style="text-align: justify">جمله زیر با متن داده شده تناقض دارد: </div> | | <p style="text-align: center;">11</p> | <div dir="rtl" style="text-align: justify">جمله‌ای بنویس که ناقض متن زیر باشد: </div> | <div dir="rtl" style="text-align: justify">جمله زیر ناقض متن داده شده است: </div> | | <p style="text-align: center;">12</p> | <div dir="rtl" style="text-align: justify">جمله‌ای بنویس که متن زیر را نقض کند: </div> | <div dir="rtl" style="text-align: justify">جمله زیر متن داده شده را نقض می‌کند: </div> | | <p style="text-align: center;">13</p> | <div dir="rtl" style="text-align: justify">جمله‌ای بنویس که متن زیر را نقض کند: </div> | <div dir="rtl" style="text-align: justify">جمله زیر با متن داده شده تناقض دارد: </div> | | <p style="text-align: center;">14</p> | <div dir="rtl" style="text-align: justify">جمله‌ای بنویس که متن زیر را نقض کند: </div> | <div dir="rtl" style="text-align: justify">جمله زیر ناقض متن داده شده است: </div> | | <p style="text-align: center;">15</p> | <div dir="rtl" style="text-align: justify">جمله‌ای متناقض با متن زیر بنویس: </div> | <div dir="rtl" style="text-align: justify">جمله زیر متن داده شده را نقض می‌کند: </div> | | <p style="text-align: center;">16</p> | <div dir="rtl" style="text-align: justify">جمله‌ای متناقض با متن زیر بنویس: </div> | <div dir="rtl" style="text-align: justify">جمله زیر با متن داده شده تناقض دارد: </div> | | <p style="text-align: center;">17</p> | <div dir="rtl" style="text-align: justify">جمله‌ای متناقض با متن زیر بنویس: </div> | <div dir="rtl" style="text-align: justify">جمله زیر ناقض متن داده شده است: </div> |
hghader1/FarsTail-Instruct-LLM
[ "task_categories:text-generation", "size_categories:10K<n<100K", "language:fa", "license:apache-2.0", "region:us" ]
2023-10-02T17:35:03+00:00
{"language": ["fa"], "license": "apache-2.0", "size_categories": ["10K<n<100K"], "task_categories": ["text-generation"], "pretty_name": "FarsTail dataset in prompt completion format", "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}, {"split": "valid", "path": "data/valid-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "template_lang", "dtype": "string"}, {"name": "template_id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 3062627, "num_examples": 4818}, {"name": "test", "num_bytes": 645712, "num_examples": 1029}, {"name": "valid", "num_bytes": 636115, "num_examples": 1014}], "download_size": 1362720, "dataset_size": 4344454}}
2024-01-29T12:29:25+00:00
[]
[ "fa" ]
TAGS #task_categories-text-generation #size_categories-10K<n<100K #language-Persian #license-apache-2.0 #region-us
Dataset Card for "FarsTail-Instruct-LLM" ======================================== Dataset Description ------------------- FarsTail Instruct LLM dataset is a Persian (Farsi) dataset aimed to be used for text generation tasks by large language models. The dataset is in prompt/completion (instruction) format. The dataset is created from FarsTail dataset by changing the format of "e" (entailment) and "c" (contradiction) cases and adding some command and completion templates. Prompt and Completion Templates ------------------------------- Here are the templates used for prompt and completion in each datapoint. The templates are assined randomly. ### Templates Used for "e" Cases ### Templates Used for "c" Cases For each datapoint, one prompt template and one completion template are used. As mentioned earlier, the templates are assined randomly. Each unique combination of a prompt template and completion template has got a unique id which is included in the 'template\_id' column. Here are the mappings between the template combinations and the IDs. ### Template Combination for "e" Cases ID : 0 , Prompt Templates : از متن زیر چه نتیجه‌ای می‌توان گرفت: , Completion Templates : از متن داده شده می‌توان نتیجه زیر را گرفت: ID : 1 , Prompt Templates : از متن زیر چه نتیجه‌ای می‌توان گرفت: , Completion Templates : جمله‌ زیر را می‌توان از متن داده شده نتیجه گرفت: ID : 2 , Prompt Templates : از متن زیر چه نتیجه‌ای می‌توان گرفت: , Completion Templates : با توجه به متن داده شده می‌توان نتیجه زیر را گرفت: ID : 3 , Prompt Templates : جمله‌ای بنویس که می‌تواند نتیجه‌ متن زیر باشد: , Completion Templates : از متن داده شده می‌توان نتیجه زیر را گرفت: ID : 4 , Prompt Templates : جمله‌ای بنویس که می‌تواند نتیجه‌ متن زیر باشد: , Completion Templates : جمله‌ زیر را می‌توان از متن داده شده نتیجه گرفت: ID : 5 , Prompt Templates : جمله‌ای بنویس که می‌تواند نتیجه‌ متن زیر باشد: , Completion Templates : با توجه به متن داده شده می‌توان نتیجه زیر را گرفت: ID : 6 , Prompt Templates : جمله‌ای بنویس که می‌توان از متن زیر نتیجه گرفت: , Completion Templates : از متن داده شده می‌توان نتیجه زیر را گرفت: ID : 7 , Prompt Templates : جمله‌ای بنویس که می‌توان از متن زیر نتیجه گرفت: , Completion Templates : جمله‌ زیر را می‌توان از متن داده شده نتیجه گرفت: ID : 8 , Prompt Templates : جمله‌ای بنویس که می‌توان از متن زیر نتیجه گرفت: , Completion Templates : با توجه به متن داده شده می‌توان نتیجه زیر را گرفت: ### Template Combination for "e" Cases ID : 9 , Prompt Templates : جمله‌ای بنویس که ناقض متن زیر باشد: , Completion Templates : جمله زیر متن داده شده را نقض می‌کند: ID : 10 , Prompt Templates : جمله‌ای بنویس که ناقض متن زیر باشد: , Completion Templates : جمله زیر با متن داده شده تناقض دارد: ID : 11 , Prompt Templates : جمله‌ای بنویس که ناقض متن زیر باشد: , Completion Templates : جمله زیر ناقض متن داده شده است: ID : 12 , Prompt Templates : جمله‌ای بنویس که متن زیر را نقض کند: , Completion Templates : جمله زیر متن داده شده را نقض می‌کند: ID : 13 , Prompt Templates : جمله‌ای بنویس که متن زیر را نقض کند: , Completion Templates : جمله زیر با متن داده شده تناقض دارد: ID : 14 , Prompt Templates : جمله‌ای بنویس که متن زیر را نقض کند: , Completion Templates : جمله زیر ناقض متن داده شده است: ID : 15 , Prompt Templates : جمله‌ای متناقض با متن زیر بنویس: , Completion Templates : جمله زیر متن داده شده را نقض می‌کند: ID : 16 , Prompt Templates : جمله‌ای متناقض با متن زیر بنویس: , Completion Templates : جمله زیر با متن داده شده تناقض دارد: ID : 17 , Prompt Templates : جمله‌ای متناقض با متن زیر بنویس: , Completion Templates : جمله زیر ناقض متن داده شده است:
[ "### Templates Used for \"e\" Cases", "### Templates Used for \"c\" Cases\n\n\n\nFor each datapoint, one prompt template and one completion template are used. As mentioned earlier, the templates are assined randomly. Each unique combination of a prompt template and completion template has got a unique id which is included in the 'template\\_id' column.\nHere are the mappings between the template combinations and the IDs.", "### Template Combination for \"e\" Cases\n\n\nID\n\n: 0\n\n, Prompt Templates\n\n: از متن زیر چه نتیجه‌ای می‌توان گرفت: , Completion Templates\n\n: از متن داده شده می‌توان نتیجه زیر را گرفت:\nID\n\n: 1\n\n, Prompt Templates\n\n: از متن زیر چه نتیجه‌ای می‌توان گرفت: , Completion Templates\n\n: جمله‌ زیر را می‌توان از متن داده شده نتیجه گرفت:\nID\n\n: 2\n\n, Prompt Templates\n\n: از متن زیر چه نتیجه‌ای می‌توان گرفت: , Completion Templates\n\n: با توجه به متن داده شده می‌توان نتیجه زیر را گرفت:\nID\n\n: 3\n\n, Prompt Templates\n\n: جمله‌ای بنویس که می‌تواند نتیجه‌ متن زیر باشد: , Completion Templates\n\n: از متن داده شده می‌توان نتیجه زیر را گرفت:\nID\n\n: 4\n\n, Prompt Templates\n\n: جمله‌ای بنویس که می‌تواند نتیجه‌ متن زیر باشد: , Completion Templates\n\n: جمله‌ زیر را می‌توان از متن داده شده نتیجه گرفت:\nID\n\n: 5\n\n, Prompt Templates\n\n: جمله‌ای بنویس که می‌تواند نتیجه‌ متن زیر باشد: , Completion Templates\n\n: با توجه به متن داده شده می‌توان نتیجه زیر را گرفت:\nID\n\n: 6\n\n, Prompt Templates\n\n: جمله‌ای بنویس که می‌توان از متن زیر نتیجه گرفت: , Completion Templates\n\n: از متن داده شده می‌توان نتیجه زیر را گرفت:\nID\n\n: 7\n\n, Prompt Templates\n\n: جمله‌ای بنویس که می‌توان از متن زیر نتیجه گرفت: , Completion Templates\n\n: جمله‌ زیر را می‌توان از متن داده شده نتیجه گرفت:\nID\n\n: 8\n\n, Prompt Templates\n\n: جمله‌ای بنویس که می‌توان از متن زیر نتیجه گرفت: , Completion Templates\n\n: با توجه به متن داده شده می‌توان نتیجه زیر را گرفت:", "### Template Combination for \"e\" Cases\n\n\nID\n\n: 9\n\n, Prompt Templates\n\n: جمله‌ای بنویس که ناقض متن زیر باشد: , Completion Templates\n\n: جمله زیر متن داده شده را نقض می‌کند: \nID\n\n: 10\n\n, Prompt Templates\n\n: جمله‌ای بنویس که ناقض متن زیر باشد: , Completion Templates\n\n: جمله زیر با متن داده شده تناقض دارد: \nID\n\n: 11\n\n, Prompt Templates\n\n: جمله‌ای بنویس که ناقض متن زیر باشد: , Completion Templates\n\n: جمله زیر ناقض متن داده شده است: \nID\n\n: 12\n\n, Prompt Templates\n\n: جمله‌ای بنویس که متن زیر را نقض کند: , Completion Templates\n\n: جمله زیر متن داده شده را نقض می‌کند: \nID\n\n: 13\n\n, Prompt Templates\n\n: جمله‌ای بنویس که متن زیر را نقض کند: , Completion Templates\n\n: جمله زیر با متن داده شده تناقض دارد: \nID\n\n: 14\n\n, Prompt Templates\n\n: جمله‌ای بنویس که متن زیر را نقض کند: , Completion Templates\n\n: جمله زیر ناقض متن داده شده است: \nID\n\n: 15\n\n, Prompt Templates\n\n: جمله‌ای متناقض با متن زیر بنویس: , Completion Templates\n\n: جمله زیر متن داده شده را نقض می‌کند: \nID\n\n: 16\n\n, Prompt Templates\n\n: جمله‌ای متناقض با متن زیر بنویس: , Completion Templates\n\n: جمله زیر با متن داده شده تناقض دارد: \nID\n\n: 17\n\n, Prompt Templates\n\n: جمله‌ای متناقض با متن زیر بنویس: , Completion Templates\n\n: جمله زیر ناقض متن داده شده است:" ]
[ "TAGS\n#task_categories-text-generation #size_categories-10K<n<100K #language-Persian #license-apache-2.0 #region-us \n", "### Templates Used for \"e\" Cases", "### Templates Used for \"c\" Cases\n\n\n\nFor each datapoint, one prompt template and one completion template are used. As mentioned earlier, the templates are assined randomly. Each unique combination of a prompt template and completion template has got a unique id which is included in the 'template\\_id' column.\nHere are the mappings between the template combinations and the IDs.", "### Template Combination for \"e\" Cases\n\n\nID\n\n: 0\n\n, Prompt Templates\n\n: از متن زیر چه نتیجه‌ای می‌توان گرفت: , Completion Templates\n\n: از متن داده شده می‌توان نتیجه زیر را گرفت:\nID\n\n: 1\n\n, Prompt Templates\n\n: از متن زیر چه نتیجه‌ای می‌توان گرفت: , Completion Templates\n\n: جمله‌ زیر را می‌توان از متن داده شده نتیجه گرفت:\nID\n\n: 2\n\n, Prompt Templates\n\n: از متن زیر چه نتیجه‌ای می‌توان گرفت: , Completion Templates\n\n: با توجه به متن داده شده می‌توان نتیجه زیر را گرفت:\nID\n\n: 3\n\n, Prompt Templates\n\n: جمله‌ای بنویس که می‌تواند نتیجه‌ متن زیر باشد: , Completion Templates\n\n: از متن داده شده می‌توان نتیجه زیر را گرفت:\nID\n\n: 4\n\n, Prompt Templates\n\n: جمله‌ای بنویس که می‌تواند نتیجه‌ متن زیر باشد: , Completion Templates\n\n: جمله‌ زیر را می‌توان از متن داده شده نتیجه گرفت:\nID\n\n: 5\n\n, Prompt Templates\n\n: جمله‌ای بنویس که می‌تواند نتیجه‌ متن زیر باشد: , Completion Templates\n\n: با توجه به متن داده شده می‌توان نتیجه زیر را گرفت:\nID\n\n: 6\n\n, Prompt Templates\n\n: جمله‌ای بنویس که می‌توان از متن زیر نتیجه گرفت: , Completion Templates\n\n: از متن داده شده می‌توان نتیجه زیر را گرفت:\nID\n\n: 7\n\n, Prompt Templates\n\n: جمله‌ای بنویس که می‌توان از متن زیر نتیجه گرفت: , Completion Templates\n\n: جمله‌ زیر را می‌توان از متن داده شده نتیجه گرفت:\nID\n\n: 8\n\n, Prompt Templates\n\n: جمله‌ای بنویس که می‌توان از متن زیر نتیجه گرفت: , Completion Templates\n\n: با توجه به متن داده شده می‌توان نتیجه زیر را گرفت:", "### Template Combination for \"e\" Cases\n\n\nID\n\n: 9\n\n, Prompt Templates\n\n: جمله‌ای بنویس که ناقض متن زیر باشد: , Completion Templates\n\n: جمله زیر متن داده شده را نقض می‌کند: \nID\n\n: 10\n\n, Prompt Templates\n\n: جمله‌ای بنویس که ناقض متن زیر باشد: , Completion Templates\n\n: جمله زیر با متن داده شده تناقض دارد: \nID\n\n: 11\n\n, Prompt Templates\n\n: جمله‌ای بنویس که ناقض متن زیر باشد: , Completion Templates\n\n: جمله زیر ناقض متن داده شده است: \nID\n\n: 12\n\n, Prompt Templates\n\n: جمله‌ای بنویس که متن زیر را نقض کند: , Completion Templates\n\n: جمله زیر متن داده شده را نقض می‌کند: \nID\n\n: 13\n\n, Prompt Templates\n\n: جمله‌ای بنویس که متن زیر را نقض کند: , Completion Templates\n\n: جمله زیر با متن داده شده تناقض دارد: \nID\n\n: 14\n\n, Prompt Templates\n\n: جمله‌ای بنویس که متن زیر را نقض کند: , Completion Templates\n\n: جمله زیر ناقض متن داده شده است: \nID\n\n: 15\n\n, Prompt Templates\n\n: جمله‌ای متناقض با متن زیر بنویس: , Completion Templates\n\n: جمله زیر متن داده شده را نقض می‌کند: \nID\n\n: 16\n\n, Prompt Templates\n\n: جمله‌ای متناقض با متن زیر بنویس: , Completion Templates\n\n: جمله زیر با متن داده شده تناقض دارد: \nID\n\n: 17\n\n, Prompt Templates\n\n: جمله‌ای متناقض با متن زیر بنویس: , Completion Templates\n\n: جمله زیر ناقض متن داده شده است:" ]
[ 42, 12, 90, 396, 366 ]
[ "passage: TAGS\n#task_categories-text-generation #size_categories-10K<n<100K #language-Persian #license-apache-2.0 #region-us \n### Templates Used for \"e\" Cases### Templates Used for \"c\" Cases\n\n\n\nFor each datapoint, one prompt template and one completion template are used. As mentioned earlier, the templates are assined randomly. Each unique combination of a prompt template and completion template has got a unique id which is included in the 'template\\_id' column.\nHere are the mappings between the template combinations and the IDs." ]
6bdeaf311e673575f0f7d4c7f90d7ffbd879729b
# Dataset Card for "french_30b2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
manu/french-30b
[ "region:us" ]
2023-10-02T17:47:43+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "dataset_id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 108345820734, "num_examples": 63517737}, {"name": "test", "num_bytes": 483041948, "num_examples": 93498}], "download_size": 0, "dataset_size": 108828862682}}
2023-10-16T04:21:51+00:00
[]
[]
TAGS #region-us
# Dataset Card for "french_30b2" More Information needed
[ "# Dataset Card for \"french_30b2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"french_30b2\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"french_30b2\"\n\nMore Information needed" ]
9844321ff815d98b1c637fe72f32b442e3b611c2
# Dataset Card for Dataset Name This Dataset contains common poor jokes in the form of question answers. ## Dataset Details ### Dataset Description This dataset contains common poor jokes. These jokes were curated by browsing various webpages. The goal behind building the dataset is to enable LLM finetuning for humorous responses. The dataset covers different domains. This dataset contains conversations that may be considered unsafe, offensive, or upsetting. We are not responsible for any outputs of the models trained on this dataset. Statements or opinions made in this dataset do not reflect the views of researchers or institutions involved in the data collection effort. Users of this data are responsible for ensuring its appropriate use, which includes abiding by any applicable laws and regulations. - **Curated by:** Sri Soundararajan - **Funded by [optional]:** Sri Soundararajan - **Shared by [optional]:** Sri Soundararajan - **Language(s) (NLP):** English - **License:** MIT ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** https://huggingface.co/datasets/ssounda1/mokka-chat-ds-v1 - **Paper [optional]:** N/A - **Demo [optional]:** N/A ## Uses The dataset is to be used for building, pre-training and fine-tuning LLMs for a humor enhanced Question Answering use case. ### Direct Use Adding a touch of humor into question answering [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure Simple structure of json blobs and lists - { "train": [ { "source": <String: Include the URL when applicable>, "data": [ { "question": <String>, "answers": [ <String>, <String> ], "context": <String> } ] } ] } ## Dataset Creation ### Curation Rationale To build a dataset for contextual Question Answering and adding humor along the way. ### Source Data Sources are listed as part of the dataset structure. #### Data Collection and Processing Manually collected and processed. #### Who are the source data producers? Various webpages ## Dataset Card Contact Sri Soundararajan <[email protected]>
ssounda1/mokka-chat-ds-v1
[ "task_categories:question-answering", "size_categories:1K<n<10K", "language:en", "license:mit", "not-for-all-audiences", "region:us" ]
2023-10-02T17:50:27+00:00
{"language": ["en"], "license": "mit", "size_categories": ["1K<n<10K"], "task_categories": ["question-answering"], "pretty_name": "Poor Jokes Dataset", "tags": ["not-for-all-audiences"]}
2023-12-23T04:03:57+00:00
[]
[ "en" ]
TAGS #task_categories-question-answering #size_categories-1K<n<10K #language-English #license-mit #not-for-all-audiences #region-us
# Dataset Card for Dataset Name This Dataset contains common poor jokes in the form of question answers. ## Dataset Details ### Dataset Description This dataset contains common poor jokes. These jokes were curated by browsing various webpages. The goal behind building the dataset is to enable LLM finetuning for humorous responses. The dataset covers different domains. This dataset contains conversations that may be considered unsafe, offensive, or upsetting. We are not responsible for any outputs of the models trained on this dataset. Statements or opinions made in this dataset do not reflect the views of researchers or institutions involved in the data collection effort. Users of this data are responsible for ensuring its appropriate use, which includes abiding by any applicable laws and regulations. - Curated by: Sri Soundararajan - Funded by [optional]: Sri Soundararajan - Shared by [optional]: Sri Soundararajan - Language(s) (NLP): English - License: MIT ### Dataset Sources [optional] - Repository: URL - Paper [optional]: N/A - Demo [optional]: N/A ## Uses The dataset is to be used for building, pre-training and fine-tuning LLMs for a humor enhanced Question Answering use case. ### Direct Use Adding a touch of humor into question answering ### Out-of-Scope Use ## Dataset Structure Simple structure of json blobs and lists - { "train": [ { "source": <String: Include the URL when applicable>, "data": [ { "question": <String>, "answers": [ <String>, <String> ], "context": <String> } ] } ] } ## Dataset Creation ### Curation Rationale To build a dataset for contextual Question Answering and adding humor along the way. ### Source Data Sources are listed as part of the dataset structure. #### Data Collection and Processing Manually collected and processed. #### Who are the source data producers? Various webpages ## Dataset Card Contact Sri Soundararajan <URL@URL>
[ "# Dataset Card for Dataset Name\n\nThis Dataset contains common poor jokes in the form of question answers.", "## Dataset Details", "### Dataset Description\n\nThis dataset contains common poor jokes. These jokes were curated by browsing various webpages. The goal behind building the dataset is to enable LLM finetuning for humorous responses.\nThe dataset covers different domains.\nThis dataset contains conversations that may be considered unsafe, offensive, or upsetting. We are not responsible for any outputs of the models trained on this dataset.\nStatements or opinions made in this dataset do not reflect the views of researchers or institutions involved in the data collection effort.\nUsers of this data are responsible for ensuring its appropriate use, which includes abiding by any applicable laws and regulations.\n\n\n- Curated by: Sri Soundararajan\n- Funded by [optional]: Sri Soundararajan\n- Shared by [optional]: Sri Soundararajan\n- Language(s) (NLP): English\n- License: MIT", "### Dataset Sources [optional]\n\n\n\n- Repository: URL\n- Paper [optional]: N/A\n- Demo [optional]: N/A", "## Uses\n\nThe dataset is to be used for building, pre-training and fine-tuning LLMs for a humor enhanced Question Answering use case.", "### Direct Use\n\nAdding a touch of humor into question answering", "### Out-of-Scope Use", "## Dataset Structure\n\nSimple structure of json blobs and lists -\n{\n \"train\": [\n {\n \"source\": <String: Include the URL when applicable>,\n \"data\": [\n {\n \"question\": <String>,\n \"answers\": [\n <String>,\n <String>\n ],\n \"context\": <String>\n }\n ]\n }\n ]\n}", "## Dataset Creation", "### Curation Rationale\n\nTo build a dataset for contextual Question Answering and adding humor along the way.", "### Source Data\n\nSources are listed as part of the dataset structure.", "#### Data Collection and Processing\n\nManually collected and processed.", "#### Who are the source data producers?\n\nVarious webpages", "## Dataset Card Contact\n\nSri Soundararajan <URL@URL>" ]
[ "TAGS\n#task_categories-question-answering #size_categories-1K<n<10K #language-English #license-mit #not-for-all-audiences #region-us \n", "# Dataset Card for Dataset Name\n\nThis Dataset contains common poor jokes in the form of question answers.", "## Dataset Details", "### Dataset Description\n\nThis dataset contains common poor jokes. These jokes were curated by browsing various webpages. The goal behind building the dataset is to enable LLM finetuning for humorous responses.\nThe dataset covers different domains.\nThis dataset contains conversations that may be considered unsafe, offensive, or upsetting. We are not responsible for any outputs of the models trained on this dataset.\nStatements or opinions made in this dataset do not reflect the views of researchers or institutions involved in the data collection effort.\nUsers of this data are responsible for ensuring its appropriate use, which includes abiding by any applicable laws and regulations.\n\n\n- Curated by: Sri Soundararajan\n- Funded by [optional]: Sri Soundararajan\n- Shared by [optional]: Sri Soundararajan\n- Language(s) (NLP): English\n- License: MIT", "### Dataset Sources [optional]\n\n\n\n- Repository: URL\n- Paper [optional]: N/A\n- Demo [optional]: N/A", "## Uses\n\nThe dataset is to be used for building, pre-training and fine-tuning LLMs for a humor enhanced Question Answering use case.", "### Direct Use\n\nAdding a touch of humor into question answering", "### Out-of-Scope Use", "## Dataset Structure\n\nSimple structure of json blobs and lists -\n{\n \"train\": [\n {\n \"source\": <String: Include the URL when applicable>,\n \"data\": [\n {\n \"question\": <String>,\n \"answers\": [\n <String>,\n <String>\n ],\n \"context\": <String>\n }\n ]\n }\n ]\n}", "## Dataset Creation", "### Curation Rationale\n\nTo build a dataset for contextual Question Answering and adding humor along the way.", "### Source Data\n\nSources are listed as part of the dataset structure.", "#### Data Collection and Processing\n\nManually collected and processed.", "#### Who are the source data producers?\n\nVarious webpages", "## Dataset Card Contact\n\nSri Soundararajan <URL@URL>" ]
[ 48, 25, 4, 203, 36, 35, 14, 9, 89, 5, 25, 16, 15, 15, 15 ]
[ "passage: TAGS\n#task_categories-question-answering #size_categories-1K<n<10K #language-English #license-mit #not-for-all-audiences #region-us \n# Dataset Card for Dataset Name\n\nThis Dataset contains common poor jokes in the form of question answers.## Dataset Details### Dataset Description\n\nThis dataset contains common poor jokes. These jokes were curated by browsing various webpages. The goal behind building the dataset is to enable LLM finetuning for humorous responses.\nThe dataset covers different domains.\nThis dataset contains conversations that may be considered unsafe, offensive, or upsetting. We are not responsible for any outputs of the models trained on this dataset.\nStatements or opinions made in this dataset do not reflect the views of researchers or institutions involved in the data collection effort.\nUsers of this data are responsible for ensuring its appropriate use, which includes abiding by any applicable laws and regulations.\n\n\n- Curated by: Sri Soundararajan\n- Funded by [optional]: Sri Soundararajan\n- Shared by [optional]: Sri Soundararajan\n- Language(s) (NLP): English\n- License: MIT### Dataset Sources [optional]\n\n\n\n- Repository: URL\n- Paper [optional]: N/A\n- Demo [optional]: N/A## Uses\n\nThe dataset is to be used for building, pre-training and fine-tuning LLMs for a humor enhanced Question Answering use case.### Direct Use\n\nAdding a touch of humor into question answering### Out-of-Scope Use## Dataset Structure\n\nSimple structure of json blobs and lists -\n{\n \"train\": [\n {\n \"source\": <String: Include the URL when applicable>,\n \"data\": [\n {\n \"question\": <String>,\n \"answers\": [\n <String>,\n <String>\n ],\n \"context\": <String>\n }\n ]\n }\n ]\n}## Dataset Creation### Curation Rationale\n\nTo build a dataset for contextual Question Answering and adding humor along the way." ]
f17777803a88c1da40c5c856100da0c8f00de200
# Dataset Card for "arxiv-math" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ayoubkirouane/arxiv-math
[ "region:us" ]
2023-10-02T17:58:56+00:00
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 35436503.0, "num_examples": 50488}], "download_size": 18875033, "dataset_size": 35436503.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-02T17:59:00+00:00
[]
[]
TAGS #region-us
# Dataset Card for "arxiv-math" More Information needed
[ "# Dataset Card for \"arxiv-math\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"arxiv-math\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"arxiv-math\"\n\nMore Information needed" ]
c52579652ac3dbbd071eb09a9afd23c6850a3655
# Dataset Card for "arxiv-physics" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ayoubkirouane/arxiv-physics
[ "region:us" ]
2023-10-02T18:00:24+00:00
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 21113244.0, "num_examples": 30231}], "download_size": 11217441, "dataset_size": 21113244.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-02T18:00:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for "arxiv-physics" More Information needed
[ "# Dataset Card for \"arxiv-physics\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"arxiv-physics\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"arxiv-physics\"\n\nMore Information needed" ]
6556f5de7fd581a4a39b5e682378dffefb2ba408
# **Retrieval-Augmented Generation (RAG) Full 20000** **Retrieval-Augmented Generation (RAG) Full 20000 is an English dataset designed for RAG-optimized models, built by [Neural Bridge AI](https://www.neuralbridge.ai/), and released under [Apache license 2.0](https://www.apache.org/licenses/LICENSE-2.0.html).** ## **Dataset Description** #### Dataset Summary Retrieval-Augmented Generation (RAG) enhances large language models (LLMs) by allowing them to consult an external authoritative knowledge base before generating responses. This approach significantly boosts the models' ability to produce relevant, accurate, and context-specific output by extending their capabilities to specialized domains or an organization's internal data, without the need for retraining. RAG offers a cost-effective method to leverage the vast data processing power of LLMs, equipped with billions of parameters, for tasks such as question-answering, language translation, and sentence completion, ensuring that the output is always up-to-date and applicable to various contexts. RAG's importance lies in its potential to address the inherent challenges of LLMs, such as unpredictability in responses, reliance on static and potentially outdated training data, and the risk of disseminating incorrect or non-authoritative information. These issues can negatively affect user trust in AI-powered applications, making RAG's ability to guide LLMs toward authoritative sources for information retrieval invaluable. RAG has multiple benefits, including cost-effective implementation and maintenance, access to current information, improved user trust through accurate information and source attribution, and greater control for developers over the information retrieval process. This approach allows for the dynamic updating of LLMs with the latest research, statistics, or news, directly addressing the challenges of maintaining relevancy and accuracy in rapidly changing knowledge landscapes. Additionally, it empowers organizations to deploy generative AI more confidently across a wider range of applications, enhancing both the user experience and the reliability of AI-driven interactions. Retrieval-Augmented Generation (RAG) Full 20000 dataset is a sigle-feature dataset, with each entry containing a "clear_prompt" field, designed to help build RAG-optimized models. This data consists of 20000 entries, and the data is from [Falcon RefinedWeb](https://huggingface.co/datasets/tiiuae/falcon-refinedweb), [gsm8k](https://huggingface.co/datasets/gsm8k), and [RAG Hallucination Dataset 1000](https://huggingface.co/datasets/neural-bridge/rag-hallucination-dataset-1000). ```python from datasets import load_dataset rag_full = load_dataset("neural-bridge/rag-full-20000") ``` #### Languages The text in the dataset is in English. The associated BCP-47 code is `en`. ## **Dataset Structure** #### Data Instances A typical data point comprises the "clear_prompt" field, which is the concatenation of "context" (optional), "question", and "answer" fields. The context is obtained from [Falcon RefinedWeb](https://huggingface.co/datasets/tiiuae/falcon-refinedweb) and [RAG Hallucination Dataset 1000](https://huggingface.co/datasets/neural-bridge/rag-hallucination-dataset-1000). The question and answer for each data point are neither obtained by [gsm8k](https://huggingface.co/datasets/gsm8k) nor generated by GPT-4. An example from the dataset looks like the following: ``` { clear_prompt: ... } ``` #### Data Fields - `clear_prompt`: A string consisting of a range of tokens. It includes the "context (optional)", "question", and "answer" fields between "##CONTEXT##", "##QUESTION##", and "##ANSWER##" tags respectively. #### Data Splits The data is split into a training and test set. The split sizes are as follow: | | Train | Test | | ----- | ------ | ---- | | RAG Full 20000 | 17433 | 4359 | ## Source Data The data points in the dataset are from the [Falcon RefinedWeb](https://huggingface.co/datasets/tiiuae/falcon-refinedweb), [gsm8k](https://huggingface.co/datasets/gsm8k), and [RAG Hallucination Dataset 1000](https://huggingface.co/datasets/neural-bridge/rag-hallucination-dataset-1000) datasets. ## **Neural Bridge AI RAG Datasets Index** | Model | Link | | ----- | ------ | | RAG Full 20000 | [link](https://huggingface.co/datasets/neural-bridge/rag-full-20000) | | RAG Dataset 12000 | [link](https://huggingface.co/datasets/neural-bridge/rag-dataset-12000) | | RAG Dataset 1200 | [link](https://huggingface.co/datasets/neural-bridge/rag-dataset-1200) | | RAG Hallucination Dataset 1000 | [link](https://huggingface.co/datasets/neural-bridge/rag-hallucination-dataset-1000) | ## **License** This public extract is made available under [Apache license 2.0](https://www.apache.org/licenses/LICENSE-2.0.html). Users should also abide to the [Falcon RefinedWeb](https://huggingface.co/datasets/tiiuae/falcon-refinedweb), [gsm8k](https://huggingface.co/datasets/gsm8k), and [RAG Hallucination Dataset 1000](https://huggingface.co/datasets/neural-bridge/rag-hallucination-dataset-1000) ToUs.
neural-bridge/rag-full-20000
[ "task_categories:question-answering", "size_categories:10K<n<100K", "language:en", "license:apache-2.0", "retrieval-augmented-generation", "region:us" ]
2023-10-02T19:13:17+00:00
{"language": ["en"], "license": "apache-2.0", "size_categories": ["10K<n<100K"], "task_categories": ["question-answering"], "dataset_info": {"features": [{"name": "clear_prompt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 43183498.53262665, "num_examples": 17433}, {"name": "test", "num_bytes": 10797732.467373349, "num_examples": 4359}], "download_size": 32335855, "dataset_size": 53981231}, "tags": ["retrieval-augmented-generation"]}
2024-02-05T18:24:39+00:00
[]
[ "en" ]
TAGS #task_categories-question-answering #size_categories-10K<n<100K #language-English #license-apache-2.0 #retrieval-augmented-generation #region-us
Retrieval-Augmented Generation (RAG) Full 20000 =============================================== Retrieval-Augmented Generation (RAG) Full 20000 is an English dataset designed for RAG-optimized models, built by Neural Bridge AI, and released under Apache license 2.0. Dataset Description ------------------- #### Dataset Summary Retrieval-Augmented Generation (RAG) enhances large language models (LLMs) by allowing them to consult an external authoritative knowledge base before generating responses. This approach significantly boosts the models' ability to produce relevant, accurate, and context-specific output by extending their capabilities to specialized domains or an organization's internal data, without the need for retraining. RAG offers a cost-effective method to leverage the vast data processing power of LLMs, equipped with billions of parameters, for tasks such as question-answering, language translation, and sentence completion, ensuring that the output is always up-to-date and applicable to various contexts. RAG's importance lies in its potential to address the inherent challenges of LLMs, such as unpredictability in responses, reliance on static and potentially outdated training data, and the risk of disseminating incorrect or non-authoritative information. These issues can negatively affect user trust in AI-powered applications, making RAG's ability to guide LLMs toward authoritative sources for information retrieval invaluable. RAG has multiple benefits, including cost-effective implementation and maintenance, access to current information, improved user trust through accurate information and source attribution, and greater control for developers over the information retrieval process. This approach allows for the dynamic updating of LLMs with the latest research, statistics, or news, directly addressing the challenges of maintaining relevancy and accuracy in rapidly changing knowledge landscapes. Additionally, it empowers organizations to deploy generative AI more confidently across a wider range of applications, enhancing both the user experience and the reliability of AI-driven interactions. Retrieval-Augmented Generation (RAG) Full 20000 dataset is a sigle-feature dataset, with each entry containing a "clear\_prompt" field, designed to help build RAG-optimized models. This data consists of 20000 entries, and the data is from Falcon RefinedWeb, gsm8k, and RAG Hallucination Dataset 1000. #### Languages The text in the dataset is in English. The associated BCP-47 code is 'en'. Dataset Structure ----------------- #### Data Instances A typical data point comprises the "clear\_prompt" field, which is the concatenation of "context" (optional), "question", and "answer" fields. The context is obtained from Falcon RefinedWeb and RAG Hallucination Dataset 1000. The question and answer for each data point are neither obtained by gsm8k nor generated by GPT-4. An example from the dataset looks like the following: #### Data Fields * 'clear\_prompt': A string consisting of a range of tokens. It includes the "context (optional)", "question", and "answer" fields between "##CONTEXT##", "##QUESTION##", and "##ANSWER##" tags respectively. #### Data Splits The data is split into a training and test set. The split sizes are as follow: Train: RAG Full 20000, Test: 17433 Source Data ----------- The data points in the dataset are from the Falcon RefinedWeb, gsm8k, and RAG Hallucination Dataset 1000 datasets. Neural Bridge AI RAG Datasets Index ----------------------------------- License ------- This public extract is made available under Apache license 2.0. Users should also abide to the Falcon RefinedWeb, gsm8k, and RAG Hallucination Dataset 1000 ToUs.
[ "#### Dataset Summary\n\n\nRetrieval-Augmented Generation (RAG) enhances large language models (LLMs) by allowing them to consult an external authoritative knowledge base before generating responses. This approach significantly boosts the models' ability to produce relevant, accurate, and context-specific output by extending their capabilities to specialized domains or an organization's internal data, without the need for retraining. RAG offers a cost-effective method to leverage the vast data processing power of LLMs, equipped with billions of parameters, for tasks such as question-answering, language translation, and sentence completion, ensuring that the output is always up-to-date and applicable to various contexts.\n\n\nRAG's importance lies in its potential to address the inherent challenges of LLMs, such as unpredictability in responses, reliance on static and potentially outdated training data, and the risk of disseminating incorrect or non-authoritative information. These issues can negatively affect user trust in AI-powered applications, making RAG's ability to guide LLMs toward authoritative sources for information retrieval invaluable.\n\n\nRAG has multiple benefits, including cost-effective implementation and maintenance, access to current information, improved user trust through accurate information and source attribution, and greater control for developers over the information retrieval process. This approach allows for the dynamic updating of LLMs with the latest research, statistics, or news, directly addressing the challenges of maintaining relevancy and accuracy in rapidly changing knowledge landscapes. Additionally, it empowers organizations to deploy generative AI more confidently across a wider range of applications, enhancing both the user experience and the reliability of AI-driven interactions.\n\n\nRetrieval-Augmented Generation (RAG) Full 20000 dataset is a sigle-feature dataset, with each entry containing a \"clear\\_prompt\" field, designed to help build RAG-optimized models. This data consists of 20000 entries, and the data is from Falcon RefinedWeb, gsm8k, and RAG Hallucination Dataset 1000.", "#### Languages\n\n\nThe text in the dataset is in English. The associated BCP-47 code is 'en'.\n\n\nDataset Structure\n-----------------", "#### Data Instances\n\n\nA typical data point comprises the \"clear\\_prompt\" field, which is the concatenation of \"context\" (optional), \"question\", and \"answer\" fields. The context is obtained from Falcon RefinedWeb and RAG Hallucination Dataset 1000. The question and answer for each data point are neither obtained by gsm8k nor generated by GPT-4.\n\n\nAn example from the dataset looks like the following:", "#### Data Fields\n\n\n* 'clear\\_prompt': A string consisting of a range of tokens. It includes the \"context (optional)\", \"question\", and \"answer\" fields between \"##CONTEXT##\", \"##QUESTION##\", and \"##ANSWER##\" tags respectively.", "#### Data Splits\n\n\nThe data is split into a training and test set. The split sizes are as follow:\n\n\nTrain: RAG Full 20000, Test: 17433\n\n\nSource Data\n-----------\n\n\nThe data points in the dataset are from the Falcon RefinedWeb, gsm8k, and RAG Hallucination Dataset 1000 datasets.\n\n\nNeural Bridge AI RAG Datasets Index\n-----------------------------------\n\n\n\nLicense\n-------\n\n\nThis public extract is made available under Apache license 2.0. Users should also abide to the Falcon RefinedWeb, gsm8k, and RAG Hallucination Dataset 1000 ToUs." ]
[ "TAGS\n#task_categories-question-answering #size_categories-10K<n<100K #language-English #license-apache-2.0 #retrieval-augmented-generation #region-us \n", "#### Dataset Summary\n\n\nRetrieval-Augmented Generation (RAG) enhances large language models (LLMs) by allowing them to consult an external authoritative knowledge base before generating responses. This approach significantly boosts the models' ability to produce relevant, accurate, and context-specific output by extending their capabilities to specialized domains or an organization's internal data, without the need for retraining. RAG offers a cost-effective method to leverage the vast data processing power of LLMs, equipped with billions of parameters, for tasks such as question-answering, language translation, and sentence completion, ensuring that the output is always up-to-date and applicable to various contexts.\n\n\nRAG's importance lies in its potential to address the inherent challenges of LLMs, such as unpredictability in responses, reliance on static and potentially outdated training data, and the risk of disseminating incorrect or non-authoritative information. These issues can negatively affect user trust in AI-powered applications, making RAG's ability to guide LLMs toward authoritative sources for information retrieval invaluable.\n\n\nRAG has multiple benefits, including cost-effective implementation and maintenance, access to current information, improved user trust through accurate information and source attribution, and greater control for developers over the information retrieval process. This approach allows for the dynamic updating of LLMs with the latest research, statistics, or news, directly addressing the challenges of maintaining relevancy and accuracy in rapidly changing knowledge landscapes. Additionally, it empowers organizations to deploy generative AI more confidently across a wider range of applications, enhancing both the user experience and the reliability of AI-driven interactions.\n\n\nRetrieval-Augmented Generation (RAG) Full 20000 dataset is a sigle-feature dataset, with each entry containing a \"clear\\_prompt\" field, designed to help build RAG-optimized models. This data consists of 20000 entries, and the data is from Falcon RefinedWeb, gsm8k, and RAG Hallucination Dataset 1000.", "#### Languages\n\n\nThe text in the dataset is in English. The associated BCP-47 code is 'en'.\n\n\nDataset Structure\n-----------------", "#### Data Instances\n\n\nA typical data point comprises the \"clear\\_prompt\" field, which is the concatenation of \"context\" (optional), \"question\", and \"answer\" fields. The context is obtained from Falcon RefinedWeb and RAG Hallucination Dataset 1000. The question and answer for each data point are neither obtained by gsm8k nor generated by GPT-4.\n\n\nAn example from the dataset looks like the following:", "#### Data Fields\n\n\n* 'clear\\_prompt': A string consisting of a range of tokens. It includes the \"context (optional)\", \"question\", and \"answer\" fields between \"##CONTEXT##\", \"##QUESTION##\", and \"##ANSWER##\" tags respectively.", "#### Data Splits\n\n\nThe data is split into a training and test set. The split sizes are as follow:\n\n\nTrain: RAG Full 20000, Test: 17433\n\n\nSource Data\n-----------\n\n\nThe data points in the dataset are from the Falcon RefinedWeb, gsm8k, and RAG Hallucination Dataset 1000 datasets.\n\n\nNeural Bridge AI RAG Datasets Index\n-----------------------------------\n\n\n\nLicense\n-------\n\n\nThis public extract is made available under Apache license 2.0. Users should also abide to the Falcon RefinedWeb, gsm8k, and RAG Hallucination Dataset 1000 ToUs." ]
[ 53, 493, 32, 110, 80, 136 ]
[ "passage: TAGS\n#task_categories-question-answering #size_categories-10K<n<100K #language-English #license-apache-2.0 #retrieval-augmented-generation #region-us \n" ]
c25920668fa754868dd8fae4805e2f341a4205a8
# Dataset Card for "rayquaza-big" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
kira/rayquaza-big
[ "region:us" ]
2023-10-02T19:39:50+00:00
{"dataset_info": {"features": [{"name": "conversation", "list": [{"name": "from", "dtype": "string"}, {"name": "value", "dtype": "string"}]}, {"name": "sys_message", "dtype": "string"}, {"name": "tkn_len", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 3493078029.54713, "num_examples": 993983}], "download_size": 1710059593, "dataset_size": 3493078029.54713}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-18T11:18:32+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rayquaza-big" More Information needed
[ "# Dataset Card for \"rayquaza-big\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rayquaza-big\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rayquaza-big\"\n\nMore Information needed" ]
279f98f7da13450dc7184489be003ed6ca437246
# Dataset Card for "librispeech-portuguese" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
pedropauletti/librispeech-portuguese
[ "region:us" ]
2023-10-02T20:21:12+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "labels", "sequence": {"sequence": "float32"}}, {"name": "speaker_embeddings", "sequence": "float32"}], "splits": [{"name": "train", "num_bytes": 1448647649.3426037, "num_examples": 4648}, {"name": "test", "num_bytes": 161134000.58307362, "num_examples": 517}], "download_size": 1435028926, "dataset_size": 1609781649.9256773}}
2023-10-02T20:22:08+00:00
[]
[]
TAGS #region-us
# Dataset Card for "librispeech-portuguese" More Information needed
[ "# Dataset Card for \"librispeech-portuguese\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"librispeech-portuguese\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"librispeech-portuguese\"\n\nMore Information needed" ]
a5858691493e40ede94829b9d5193835a31a0834
### Dataset Generation: Initially, we select the Amazon Review Dataset as our base data, referenced from Ni et al. (2019)[^1]. We randomly extract 100,000 instances from this dataset. The original labels in this dataset are ratings, scaled from 1 to 5. For our specific task, we categorize them into Positive (rating > 3), Neutral (rating = 3), and Negative (rating < 3), ensuring a balanced number of instances for each label. To generate the synthetic Code-mixed dataset, we apply two distinct methodologies: the Random Code-mixing Algorithm by Krishnan et al. (2021)[^2] and r-CM by Santy et al. (2021)[^3]. ### Class Distribution: #### For train.csv: | Label | Count | Percentage | |----------|-------|------------| | Negative | 20000 | 33.33% | | Neutral | 20000 | 33.33% | | Positive | 19999 | 33.33% | #### For dev.csv: | Label | Count | Percentage | |----------|-------|------------| | Neutral | 6667 | 33.34% | | Positive | 6667 | 33.34% | | Negative | 6666 | 33.33% | #### For test.csv: | Label | Count | Percentage | |----------|-------|------------| | Negative | 6667 | 33.34% | | Positive | 6667 | 33.34% | | Neutral | 6666 | 33.33% | ### Cite our Paper: If you utilize this dataset, kindly cite our paper. ```bibtex @article{raihan2023mixed, title={Mixed-Distil-BERT: Code-mixed Language Modeling for Bangla, English, and Hindi}, author={Raihan, Md Nishat and Goswami, Dhiman and Mahmud, Antara}, journal={arXiv preprint arXiv:2309.10272}, year={2023} } ``` ### References [^1]: Ni, J., Li, J., & McAuley, J. (2019). Justifying recommendations using distantly-labeled reviews and fine-grained aspects. In Proceedings of the 2019 conference on empirical methods in natural language processing and the 9th international joint conference on natural language processing (EMNLP-IJCNLP) (pp. 188-197). [^2]: Krishnan, J., Anastasopoulos, A., Purohit, H., & Rangwala, H. (2021). Multilingual code-switching for zero-shot cross-lingual intent prediction and slot filling. arXiv preprint arXiv:2103.07792. [^3]: Santy, S., Srinivasan, A., & Choudhury, M. (2021). BERTologiCoMix: How does code-mixing interact with multilingual BERT? In Proceedings of the Second Workshop on Domain Adaptation for NLP (pp. 111-121). ---
md-nishat-008/Code-Mixed-Sentiment-Analysis-Dataset
[ "license:cc-by-nc-nd-4.0", "region:us" ]
2023-10-02T20:25:27+00:00
{"license": "cc-by-nc-nd-4.0"}
2023-10-02T20:27:24+00:00
[]
[]
TAGS #license-cc-by-nc-nd-4.0 #region-us
### Dataset Generation: Initially, we select the Amazon Review Dataset as our base data, referenced from Ni et al. (2019)[^1]. We randomly extract 100,000 instances from this dataset. The original labels in this dataset are ratings, scaled from 1 to 5. For our specific task, we categorize them into Positive (rating > 3), Neutral (rating = 3), and Negative (rating < 3), ensuring a balanced number of instances for each label. To generate the synthetic Code-mixed dataset, we apply two distinct methodologies: the Random Code-mixing Algorithm by Krishnan et al. (2021)[^2] and r-CM by Santy et al. (2021)[^3]. ### Class Distribution: #### For URL: Label: Negative, Count: 20000, Percentage: 33.33% Label: Neutral, Count: 20000, Percentage: 33.33% Label: Positive, Count: 19999, Percentage: 33.33% #### For URL: Label: Neutral, Count: 6667, Percentage: 33.34% Label: Positive, Count: 6667, Percentage: 33.34% Label: Negative, Count: 6666, Percentage: 33.33% #### For URL: Label: Negative, Count: 6667, Percentage: 33.34% Label: Positive, Count: 6667, Percentage: 33.34% Label: Neutral, Count: 6666, Percentage: 33.33% ### Cite our Paper: If you utilize this dataset, kindly cite our paper. ### References [^1]: Ni, J., Li, J., & McAuley, J. (2019). Justifying recommendations using distantly-labeled reviews and fine-grained aspects. In Proceedings of the 2019 conference on empirical methods in natural language processing and the 9th international joint conference on natural language processing (EMNLP-IJCNLP) (pp. 188-197). [^2]: Krishnan, J., Anastasopoulos, A., Purohit, H., & Rangwala, H. (2021). Multilingual code-switching for zero-shot cross-lingual intent prediction and slot filling. arXiv preprint arXiv:2103.07792. [^3]: Santy, S., Srinivasan, A., & Choudhury, M. (2021). BERTologiCoMix: How does code-mixing interact with multilingual BERT? In Proceedings of the Second Workshop on Domain Adaptation for NLP (pp. 111-121). ---
[ "### Dataset Generation:\n\n\nInitially, we select the Amazon Review Dataset as our base data, referenced from Ni et al. (2019)[^1]. We randomly extract 100,000 instances from this dataset. The original labels in this dataset are ratings, scaled from 1 to 5. For our specific task, we categorize them into Positive (rating > 3), Neutral (rating = 3), and Negative (rating < 3), ensuring a balanced number of instances for each label. To generate the synthetic Code-mixed dataset, we apply two distinct methodologies: the Random Code-mixing Algorithm by Krishnan et al. (2021)[^2] and r-CM by Santy et al. (2021)[^3].", "### Class Distribution:", "#### For URL:\n\n\nLabel: Negative, Count: 20000, Percentage: 33.33%\nLabel: Neutral, Count: 20000, Percentage: 33.33%\nLabel: Positive, Count: 19999, Percentage: 33.33%", "#### For URL:\n\n\nLabel: Neutral, Count: 6667, Percentage: 33.34%\nLabel: Positive, Count: 6667, Percentage: 33.34%\nLabel: Negative, Count: 6666, Percentage: 33.33%", "#### For URL:\n\n\nLabel: Negative, Count: 6667, Percentage: 33.34%\nLabel: Positive, Count: 6667, Percentage: 33.34%\nLabel: Neutral, Count: 6666, Percentage: 33.33%", "### Cite our Paper:\n\n\nIf you utilize this dataset, kindly cite our paper.", "### References\n\n\n[^1]: Ni, J., Li, J., & McAuley, J. (2019). Justifying recommendations using distantly-labeled reviews and fine-grained aspects. In Proceedings of the 2019 conference on empirical methods in natural language processing and the 9th international joint conference on natural language processing (EMNLP-IJCNLP) (pp. 188-197).\n\n\n[^2]: Krishnan, J., Anastasopoulos, A., Purohit, H., & Rangwala, H. (2021). Multilingual code-switching for zero-shot cross-lingual intent prediction and slot filling. arXiv preprint arXiv:2103.07792.\n\n\n[^3]: Santy, S., Srinivasan, A., & Choudhury, M. (2021). BERTologiCoMix: How does code-mixing interact with multilingual BERT? In Proceedings of the Second Workshop on Domain Adaptation for NLP (pp. 111-121).\n\n\n\n\n---" ]
[ "TAGS\n#license-cc-by-nc-nd-4.0 #region-us \n", "### Dataset Generation:\n\n\nInitially, we select the Amazon Review Dataset as our base data, referenced from Ni et al. (2019)[^1]. We randomly extract 100,000 instances from this dataset. The original labels in this dataset are ratings, scaled from 1 to 5. For our specific task, we categorize them into Positive (rating > 3), Neutral (rating = 3), and Negative (rating < 3), ensuring a balanced number of instances for each label. To generate the synthetic Code-mixed dataset, we apply two distinct methodologies: the Random Code-mixing Algorithm by Krishnan et al. (2021)[^2] and r-CM by Santy et al. (2021)[^3].", "### Class Distribution:", "#### For URL:\n\n\nLabel: Negative, Count: 20000, Percentage: 33.33%\nLabel: Neutral, Count: 20000, Percentage: 33.33%\nLabel: Positive, Count: 19999, Percentage: 33.33%", "#### For URL:\n\n\nLabel: Neutral, Count: 6667, Percentage: 33.34%\nLabel: Positive, Count: 6667, Percentage: 33.34%\nLabel: Negative, Count: 6666, Percentage: 33.33%", "#### For URL:\n\n\nLabel: Negative, Count: 6667, Percentage: 33.34%\nLabel: Positive, Count: 6667, Percentage: 33.34%\nLabel: Neutral, Count: 6666, Percentage: 33.33%", "### Cite our Paper:\n\n\nIf you utilize this dataset, kindly cite our paper.", "### References\n\n\n[^1]: Ni, J., Li, J., & McAuley, J. (2019). Justifying recommendations using distantly-labeled reviews and fine-grained aspects. In Proceedings of the 2019 conference on empirical methods in natural language processing and the 9th international joint conference on natural language processing (EMNLP-IJCNLP) (pp. 188-197).\n\n\n[^2]: Krishnan, J., Anastasopoulos, A., Purohit, H., & Rangwala, H. (2021). Multilingual code-switching for zero-shot cross-lingual intent prediction and slot filling. arXiv preprint arXiv:2103.07792.\n\n\n[^3]: Santy, S., Srinivasan, A., & Choudhury, M. (2021). BERTologiCoMix: How does code-mixing interact with multilingual BERT? In Proceedings of the Second Workshop on Domain Adaptation for NLP (pp. 111-121).\n\n\n\n\n---" ]
[ 19, 172, 6, 59, 59, 59, 20, 232 ]
[ "passage: TAGS\n#license-cc-by-nc-nd-4.0 #region-us \n### Dataset Generation:\n\n\nInitially, we select the Amazon Review Dataset as our base data, referenced from Ni et al. (2019)[^1]. We randomly extract 100,000 instances from this dataset. The original labels in this dataset are ratings, scaled from 1 to 5. For our specific task, we categorize them into Positive (rating > 3), Neutral (rating = 3), and Negative (rating < 3), ensuring a balanced number of instances for each label. To generate the synthetic Code-mixed dataset, we apply two distinct methodologies: the Random Code-mixing Algorithm by Krishnan et al. (2021)[^2] and r-CM by Santy et al. (2021)[^3].### Class Distribution:#### For URL:\n\n\nLabel: Negative, Count: 20000, Percentage: 33.33%\nLabel: Neutral, Count: 20000, Percentage: 33.33%\nLabel: Positive, Count: 19999, Percentage: 33.33%#### For URL:\n\n\nLabel: Neutral, Count: 6667, Percentage: 33.34%\nLabel: Positive, Count: 6667, Percentage: 33.34%\nLabel: Negative, Count: 6666, Percentage: 33.33%#### For URL:\n\n\nLabel: Negative, Count: 6667, Percentage: 33.34%\nLabel: Positive, Count: 6667, Percentage: 33.34%\nLabel: Neutral, Count: 6666, Percentage: 33.33%### Cite our Paper:\n\n\nIf you utilize this dataset, kindly cite our paper." ]
0e3666410445d77aca56381f5bd669a26defdc2a
# Dataset Card for "sales-conversations-instruction-customer" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
goendalf666/sales-conversations-instruction-customer
[ "region:us" ]
2023-10-02T20:59:21+00:00
{"dataset_info": {"features": [{"name": "0", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 21867656, "num_examples": 20927}], "download_size": 3900514, "dataset_size": 21867656}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-02T20:59:35+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sales-conversations-instruction-customer" More Information needed
[ "# Dataset Card for \"sales-conversations-instruction-customer\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sales-conversations-instruction-customer\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"sales-conversations-instruction-customer\"\n\nMore Information needed" ]
81dd197086df4fa1ec7477924d25749400c1ca95
# Code-Mixed-Offensive-Language-Identification This is a dataset for the offensive language detection task. It contains 100k code mixed data. The languages are Bangla-English-Hindi. ### Dataset Generation: Initially, the labelling schema of OLID[^1] and SOLID[^2] serves as the seed data, from which we randomly select 100,000 data instances. The labels in this dataset are categorized as Non-Offensive and Offensive for the purpose of our task. We meticulously ensure an equal number of instances for both Non-Offensive and Offensive labels. To synthesize the Code-mixed dataset, we employ two distinct methodologies: the *Random Code-mixing Algorithm* by Krishnan et al. (2021)[^3] and *r-CM* by Santy et al. (2021)[^4]. ### Class Distribution: #### For train.csv: | Label | Count | Percentage | |-------|-------|------------| | NOT | 40018 | 66.70% | | OFF | 19982 | 33.30% | #### For dev.csv: | Label | Count | Percentage | |-------|-------|------------| | NOT | 13339 | 66.70% | | OFF | 6661 | 33.30% | #### For test.csv: | Label | Count | Percentage | |-------|-------|------------| | NOT | 13340 | 66.70% | | OFF | 6660 | 33.30% | ### Cite our Paper: If you utilize this dataset, please cite our paper. ```bibtex @article{raihan2023mixed, title={Mixed-Distil-BERT: Code-mixed Language Modeling for Bangla, English, and Hindi}, author={Raihan, Md Nishat and Goswami, Dhiman and Mahmud, Antara}, journal={arXiv preprint arXiv:2309.10272}, year={2023} } ``` ### References [^1]: Zampieri, M., Malmasi, S., Nakov, P., Rosenthal, S., Farra, N., & Kumar, R. (2019). SemEval-2019 Task 6: Identifying and Categorizing Offensive Language in Social Media (OffensEval). In Proceedings of the 13th International Workshop on Semantic Evaluation (pp. 75–86). [https://aclanthology.org/S19-2010](https://aclanthology.org/S19-2010) [^2]: Rosenthal, S., Atanasova, P., Karadzhov, G., Zampieri, M., & Nakov, P. (2021). SOLID: A Large-Scale Semi-Supervised Dataset for Offensive Language Identification. In Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021 (pp. 915–928). [https://aclanthology.org/2021.findings-acl.80](https://aclanthology.org/2021.findings-acl.80) [^3]: Krishnan, J., Anastasopoulos, A., Purohit, H., & Rangwala, H. (2021). Multilingual code-switching for zero-shot cross-lingual intent prediction and slot filling. arXiv preprint arXiv:2103.07792. [^4]: Santy, S., Srinivasan, A., & Choudhury, M. (2021). BERTologiCoMix: How does code-mixing interact with multilingual BERT? In Proceedings of the Second Workshop on Domain Adaptation for NLP (pp. 111–121). ---
md-nishat-008/Code-Mixed-Offensive-Language-Detection-Dataset
[ "license:cc-by-nc-nd-4.0", "region:us" ]
2023-10-02T21:03:41+00:00
{"license": "cc-by-nc-nd-4.0"}
2023-10-02T21:05:01+00:00
[]
[]
TAGS #license-cc-by-nc-nd-4.0 #region-us
Code-Mixed-Offensive-Language-Identification ============================================ This is a dataset for the offensive language detection task. It contains 100k code mixed data. The languages are Bangla-English-Hindi. ### Dataset Generation: Initially, the labelling schema of OLID[^1] and SOLID[^2] serves as the seed data, from which we randomly select 100,000 data instances. The labels in this dataset are categorized as Non-Offensive and Offensive for the purpose of our task. We meticulously ensure an equal number of instances for both Non-Offensive and Offensive labels. To synthesize the Code-mixed dataset, we employ two distinct methodologies: the *Random Code-mixing Algorithm* by Krishnan et al. (2021)[^3] and *r-CM* by Santy et al. (2021)[^4]. ### Class Distribution: #### For URL: Label: NOT, Count: 40018, Percentage: 66.70% Label: OFF, Count: 19982, Percentage: 33.30% #### For URL: Label: NOT, Count: 13339, Percentage: 66.70% Label: OFF, Count: 6661, Percentage: 33.30% #### For URL: Label: NOT, Count: 13340, Percentage: 66.70% Label: OFF, Count: 6660, Percentage: 33.30% ### Cite our Paper: If you utilize this dataset, please cite our paper. ### References [^1]: Zampieri, M., Malmasi, S., Nakov, P., Rosenthal, S., Farra, N., & Kumar, R. (2019). SemEval-2019 Task 6: Identifying and Categorizing Offensive Language in Social Media (OffensEval). In Proceedings of the 13th International Workshop on Semantic Evaluation (pp. 75–86). URL [^2]: Rosenthal, S., Atanasova, P., Karadzhov, G., Zampieri, M., & Nakov, P. (2021). SOLID: A Large-Scale Semi-Supervised Dataset for Offensive Language Identification. In Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021 (pp. 915–928). URL [^3]: Krishnan, J., Anastasopoulos, A., Purohit, H., & Rangwala, H. (2021). Multilingual code-switching for zero-shot cross-lingual intent prediction and slot filling. arXiv preprint arXiv:2103.07792. [^4]: Santy, S., Srinivasan, A., & Choudhury, M. (2021). BERTologiCoMix: How does code-mixing interact with multilingual BERT? In Proceedings of the Second Workshop on Domain Adaptation for NLP (pp. 111–121). ---
[ "### Dataset Generation:\n\n\nInitially, the labelling schema of OLID[^1] and SOLID[^2] serves as the seed data, from which we randomly select 100,000 data instances. The labels in this dataset are categorized as Non-Offensive and Offensive for the purpose of our task. We meticulously ensure an equal number of instances for both Non-Offensive and Offensive labels. To synthesize the Code-mixed dataset, we employ two distinct methodologies: the *Random Code-mixing Algorithm* by Krishnan et al. (2021)[^3] and *r-CM* by Santy et al. (2021)[^4].", "### Class Distribution:", "#### For URL:\n\n\nLabel: NOT, Count: 40018, Percentage: 66.70%\nLabel: OFF, Count: 19982, Percentage: 33.30%", "#### For URL:\n\n\nLabel: NOT, Count: 13339, Percentage: 66.70%\nLabel: OFF, Count: 6661, Percentage: 33.30%", "#### For URL:\n\n\nLabel: NOT, Count: 13340, Percentage: 66.70%\nLabel: OFF, Count: 6660, Percentage: 33.30%", "### Cite our Paper:\n\n\nIf you utilize this dataset, please cite our paper.", "### References\n\n\n[^1]: Zampieri, M., Malmasi, S., Nakov, P., Rosenthal, S., Farra, N., & Kumar, R. (2019). SemEval-2019 Task 6: Identifying and Categorizing Offensive Language in Social Media (OffensEval). In Proceedings of the 13th International Workshop on Semantic Evaluation (pp. 75–86). URL\n\n\n[^2]: Rosenthal, S., Atanasova, P., Karadzhov, G., Zampieri, M., & Nakov, P. (2021). SOLID: A Large-Scale Semi-Supervised Dataset for Offensive Language Identification. In Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021 (pp. 915–928). URL\n\n\n[^3]: Krishnan, J., Anastasopoulos, A., Purohit, H., & Rangwala, H. (2021). Multilingual code-switching for zero-shot cross-lingual intent prediction and slot filling. arXiv preprint arXiv:2103.07792.\n\n\n[^4]: Santy, S., Srinivasan, A., & Choudhury, M. (2021). BERTologiCoMix: How does code-mixing interact with multilingual BERT? In Proceedings of the Second Workshop on Domain Adaptation for NLP (pp. 111–121).\n\n\n\n\n---" ]
[ "TAGS\n#license-cc-by-nc-nd-4.0 #region-us \n", "### Dataset Generation:\n\n\nInitially, the labelling schema of OLID[^1] and SOLID[^2] serves as the seed data, from which we randomly select 100,000 data instances. The labels in this dataset are categorized as Non-Offensive and Offensive for the purpose of our task. We meticulously ensure an equal number of instances for both Non-Offensive and Offensive labels. To synthesize the Code-mixed dataset, we employ two distinct methodologies: the *Random Code-mixing Algorithm* by Krishnan et al. (2021)[^3] and *r-CM* by Santy et al. (2021)[^4].", "### Class Distribution:", "#### For URL:\n\n\nLabel: NOT, Count: 40018, Percentage: 66.70%\nLabel: OFF, Count: 19982, Percentage: 33.30%", "#### For URL:\n\n\nLabel: NOT, Count: 13339, Percentage: 66.70%\nLabel: OFF, Count: 6661, Percentage: 33.30%", "#### For URL:\n\n\nLabel: NOT, Count: 13340, Percentage: 66.70%\nLabel: OFF, Count: 6660, Percentage: 33.30%", "### Cite our Paper:\n\n\nIf you utilize this dataset, please cite our paper.", "### References\n\n\n[^1]: Zampieri, M., Malmasi, S., Nakov, P., Rosenthal, S., Farra, N., & Kumar, R. (2019). SemEval-2019 Task 6: Identifying and Categorizing Offensive Language in Social Media (OffensEval). In Proceedings of the 13th International Workshop on Semantic Evaluation (pp. 75–86). URL\n\n\n[^2]: Rosenthal, S., Atanasova, P., Karadzhov, G., Zampieri, M., & Nakov, P. (2021). SOLID: A Large-Scale Semi-Supervised Dataset for Offensive Language Identification. In Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021 (pp. 915–928). URL\n\n\n[^3]: Krishnan, J., Anastasopoulos, A., Purohit, H., & Rangwala, H. (2021). Multilingual code-switching for zero-shot cross-lingual intent prediction and slot filling. arXiv preprint arXiv:2103.07792.\n\n\n[^4]: Santy, S., Srinivasan, A., & Choudhury, M. (2021). BERTologiCoMix: How does code-mixing interact with multilingual BERT? In Proceedings of the Second Workshop on Domain Adaptation for NLP (pp. 111–121).\n\n\n\n\n---" ]
[ 19, 161, 6, 38, 38, 38, 19, 335 ]
[ "passage: TAGS\n#license-cc-by-nc-nd-4.0 #region-us \n### Dataset Generation:\n\n\nInitially, the labelling schema of OLID[^1] and SOLID[^2] serves as the seed data, from which we randomly select 100,000 data instances. The labels in this dataset are categorized as Non-Offensive and Offensive for the purpose of our task. We meticulously ensure an equal number of instances for both Non-Offensive and Offensive labels. To synthesize the Code-mixed dataset, we employ two distinct methodologies: the *Random Code-mixing Algorithm* by Krishnan et al. (2021)[^3] and *r-CM* by Santy et al. (2021)[^4].### Class Distribution:#### For URL:\n\n\nLabel: NOT, Count: 40018, Percentage: 66.70%\nLabel: OFF, Count: 19982, Percentage: 33.30%#### For URL:\n\n\nLabel: NOT, Count: 13339, Percentage: 66.70%\nLabel: OFF, Count: 6661, Percentage: 33.30%#### For URL:\n\n\nLabel: NOT, Count: 13340, Percentage: 66.70%\nLabel: OFF, Count: 6660, Percentage: 33.30%### Cite our Paper:\n\n\nIf you utilize this dataset, please cite our paper." ]
3636077c73c46a484b0dc25dde7f3afaee8a1202
# Dataset Card for "prompt-enhance-photoreal" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
facet/prompt-enhance-photoreal
[ "region:us" ]
2023-10-02T22:08:20+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 212821, "num_examples": 1135}], "download_size": 54505, "dataset_size": 212821}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-02T22:48:02+00:00
[]
[]
TAGS #region-us
# Dataset Card for "prompt-enhance-photoreal" More Information needed
[ "# Dataset Card for \"prompt-enhance-photoreal\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"prompt-enhance-photoreal\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"prompt-enhance-photoreal\"\n\nMore Information needed" ]
5e3c3ede939a7c6c926471f86fce802ee9814c08
# Dataset Card for "govreport-qa-5-16384" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
shossain/govreport-qa-5-16384
[ "region:us" ]
2023-10-02T22:51:42+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 500027, "num_examples": 5}], "download_size": 129870, "dataset_size": 500027}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-03T20:20:46+00:00
[]
[]
TAGS #region-us
# Dataset Card for "govreport-qa-5-16384" More Information needed
[ "# Dataset Card for \"govreport-qa-5-16384\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"govreport-qa-5-16384\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"govreport-qa-5-16384\"\n\nMore Information needed" ]
3f4216183b4b4fb347951e9b7075c7f7aa931ecd
# Dataset Card for "govreport-qa-5-8192" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
shossain/govreport-qa-5-8192
[ "region:us" ]
2023-10-02T22:53:30+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 410925, "num_examples": 5}], "download_size": 110024, "dataset_size": 410925}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-03T20:26:08+00:00
[]
[]
TAGS #region-us
# Dataset Card for "govreport-qa-5-8192" More Information needed
[ "# Dataset Card for \"govreport-qa-5-8192\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"govreport-qa-5-8192\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"govreport-qa-5-8192\"\n\nMore Information needed" ]
1997a186bfdbce803c8c5075d14ed9bf154affd5
# Dataset Card for "generated-test-dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
inesgoddi/generated-test-dataset
[ "region:us" ]
2023-10-02T22:56:09+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train_expert_dna", "path": "data/train_expert_dna-*"}, {"split": "test_expert_dna", "path": "data/test_expert_dna-*"}]}], "dataset_info": {"features": [{"name": "expert-dna", "dtype": "string"}], "splits": [{"name": "train_expert_dna", "num_bytes": 98601.3, "num_examples": 90}, {"name": "test_expert_dna", "num_bytes": 10955.7, "num_examples": 10}], "download_size": 10755, "dataset_size": 109557.0}}
2023-11-07T12:06:34+00:00
[]
[]
TAGS #region-us
# Dataset Card for "generated-test-dataset" More Information needed
[ "# Dataset Card for \"generated-test-dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"generated-test-dataset\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"generated-test-dataset\"\n\nMore Information needed" ]
2f48e4c2ee5c9a5cd152faa17055464a43382c03
Second version of the synthetic dataset created by putting a part of a textbook in the context of 7B model and then asking the model to create a few questions and answers related to the dataset. It contains information about PowerShell basics, Office 365 basics and Active Directory/GPO basics.
adamo1139/PS_AD_Office365_02
[ "license:apache-2.0", "region:us" ]
2023-10-02T23:10:11+00:00
{"license": "apache-2.0"}
2023-10-02T23:13:00+00:00
[]
[]
TAGS #license-apache-2.0 #region-us
Second version of the synthetic dataset created by putting a part of a textbook in the context of 7B model and then asking the model to create a few questions and answers related to the dataset. It contains information about PowerShell basics, Office 365 basics and Active Directory/GPO basics.
[]
[ "TAGS\n#license-apache-2.0 #region-us \n" ]
[ 14 ]
[ "passage: TAGS\n#license-apache-2.0 #region-us \n" ]
ba05a6a9153e02bddc0607674f2be33a53ecd5d1
# Dataset Card for "medtrain_flashcards" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
katielink/medtrain_flashcards
[ "region:us" ]
2023-10-03T00:14:44+00:00
{"dataset_info": {"features": [{"name": "raw_flashcards", "dtype": "string"}, {"name": "clean_flashcards", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 525107, "num_examples": 2200}], "download_size": 253892, "dataset_size": 525107}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-03T01:15:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "medtrain_flashcards" More Information needed
[ "# Dataset Card for \"medtrain_flashcards\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"medtrain_flashcards\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"medtrain_flashcards\"\n\nMore Information needed" ]
1f7b71ca9685adfdaad95f6da7e75c6bd04d25b6
# Dataset Card for "e8491cc1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
result-kand2-sdxl-wuerst-karlo/e8491cc1
[ "region:us" ]
2023-10-03T00:18:56+00:00
{"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 168, "num_examples": 10}], "download_size": 1314, "dataset_size": 168}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-03T00:18:56+00:00
[]
[]
TAGS #region-us
# Dataset Card for "e8491cc1" More Information needed
[ "# Dataset Card for \"e8491cc1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"e8491cc1\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"e8491cc1\"\n\nMore Information needed" ]
fdadc3dbdc3735c8ae4fda467f08cbd75ecec02c
# Dataset Card for "78fe0016" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
result-kand2-sdxl-wuerst-karlo/78fe0016
[ "region:us" ]
2023-10-03T00:21:51+00:00
{"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 173, "num_examples": 10}], "download_size": 1317, "dataset_size": 173}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-03T00:21:52+00:00
[]
[]
TAGS #region-us
# Dataset Card for "78fe0016" More Information needed
[ "# Dataset Card for \"78fe0016\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"78fe0016\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"78fe0016\"\n\nMore Information needed" ]
a5664f982340c4dab348ca4218e1f0d6af1ca460
# Bangumi Image Base of Toradora! This is the image base of bangumi Toradora!, we detected 33 characters, 3929 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 1527 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 45 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 26 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 27 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 73 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 83 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 31 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 16 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 67 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 313 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 49 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 22 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 19 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 36 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 34 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 54 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 53 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 780 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 19 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 10 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 21 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 31 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 11 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 14 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 13 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 10 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 14 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 212 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 7 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | N/A | | 29 | 16 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | ![preview 7](29/preview_7.png) | ![preview 8](29/preview_8.png) | | 30 | 15 | [Download](30/dataset.zip) | ![preview 1](30/preview_1.png) | ![preview 2](30/preview_2.png) | ![preview 3](30/preview_3.png) | ![preview 4](30/preview_4.png) | ![preview 5](30/preview_5.png) | ![preview 6](30/preview_6.png) | ![preview 7](30/preview_7.png) | ![preview 8](30/preview_8.png) | | 31 | 14 | [Download](31/dataset.zip) | ![preview 1](31/preview_1.png) | ![preview 2](31/preview_2.png) | ![preview 3](31/preview_3.png) | ![preview 4](31/preview_4.png) | ![preview 5](31/preview_5.png) | ![preview 6](31/preview_6.png) | ![preview 7](31/preview_7.png) | ![preview 8](31/preview_8.png) | | noise | 267 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/toradora
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-03T00:34:28+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-03T02:21:53+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Toradora! =============================== This is the image base of bangumi Toradora!, we detected 33 characters, 3929 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
99df58a4b3d3753c419ee59f1e9947a1b5355876
# Bangumi Image Base of Macross Delta This is the image base of bangumi Macross Delta, we detected 45 characters, 4504 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 33 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 43 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 14 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 16 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 170 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 12 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 13 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 55 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 52 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 93 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 33 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 131 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 17 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 13 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 147 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 187 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 657 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 11 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 65 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 31 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 41 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 26 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 275 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 276 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 156 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 16 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 9 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 9 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 9 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | ![preview 8](28/preview_8.png) | | 29 | 208 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | ![preview 7](29/preview_7.png) | ![preview 8](29/preview_8.png) | | 30 | 22 | [Download](30/dataset.zip) | ![preview 1](30/preview_1.png) | ![preview 2](30/preview_2.png) | ![preview 3](30/preview_3.png) | ![preview 4](30/preview_4.png) | ![preview 5](30/preview_5.png) | ![preview 6](30/preview_6.png) | ![preview 7](30/preview_7.png) | ![preview 8](30/preview_8.png) | | 31 | 18 | [Download](31/dataset.zip) | ![preview 1](31/preview_1.png) | ![preview 2](31/preview_2.png) | ![preview 3](31/preview_3.png) | ![preview 4](31/preview_4.png) | ![preview 5](31/preview_5.png) | ![preview 6](31/preview_6.png) | ![preview 7](31/preview_7.png) | ![preview 8](31/preview_8.png) | | 32 | 96 | [Download](32/dataset.zip) | ![preview 1](32/preview_1.png) | ![preview 2](32/preview_2.png) | ![preview 3](32/preview_3.png) | ![preview 4](32/preview_4.png) | ![preview 5](32/preview_5.png) | ![preview 6](32/preview_6.png) | ![preview 7](32/preview_7.png) | ![preview 8](32/preview_8.png) | | 33 | 14 | [Download](33/dataset.zip) | ![preview 1](33/preview_1.png) | ![preview 2](33/preview_2.png) | ![preview 3](33/preview_3.png) | ![preview 4](33/preview_4.png) | ![preview 5](33/preview_5.png) | ![preview 6](33/preview_6.png) | ![preview 7](33/preview_7.png) | ![preview 8](33/preview_8.png) | | 34 | 596 | [Download](34/dataset.zip) | ![preview 1](34/preview_1.png) | ![preview 2](34/preview_2.png) | ![preview 3](34/preview_3.png) | ![preview 4](34/preview_4.png) | ![preview 5](34/preview_5.png) | ![preview 6](34/preview_6.png) | ![preview 7](34/preview_7.png) | ![preview 8](34/preview_8.png) | | 35 | 58 | [Download](35/dataset.zip) | ![preview 1](35/preview_1.png) | ![preview 2](35/preview_2.png) | ![preview 3](35/preview_3.png) | ![preview 4](35/preview_4.png) | ![preview 5](35/preview_5.png) | ![preview 6](35/preview_6.png) | ![preview 7](35/preview_7.png) | ![preview 8](35/preview_8.png) | | 36 | 28 | [Download](36/dataset.zip) | ![preview 1](36/preview_1.png) | ![preview 2](36/preview_2.png) | ![preview 3](36/preview_3.png) | ![preview 4](36/preview_4.png) | ![preview 5](36/preview_5.png) | ![preview 6](36/preview_6.png) | ![preview 7](36/preview_7.png) | ![preview 8](36/preview_8.png) | | 37 | 170 | [Download](37/dataset.zip) | ![preview 1](37/preview_1.png) | ![preview 2](37/preview_2.png) | ![preview 3](37/preview_3.png) | ![preview 4](37/preview_4.png) | ![preview 5](37/preview_5.png) | ![preview 6](37/preview_6.png) | ![preview 7](37/preview_7.png) | ![preview 8](37/preview_8.png) | | 38 | 6 | [Download](38/dataset.zip) | ![preview 1](38/preview_1.png) | ![preview 2](38/preview_2.png) | ![preview 3](38/preview_3.png) | ![preview 4](38/preview_4.png) | ![preview 5](38/preview_5.png) | ![preview 6](38/preview_6.png) | N/A | N/A | | 39 | 8 | [Download](39/dataset.zip) | ![preview 1](39/preview_1.png) | ![preview 2](39/preview_2.png) | ![preview 3](39/preview_3.png) | ![preview 4](39/preview_4.png) | ![preview 5](39/preview_5.png) | ![preview 6](39/preview_6.png) | ![preview 7](39/preview_7.png) | ![preview 8](39/preview_8.png) | | 40 | 6 | [Download](40/dataset.zip) | ![preview 1](40/preview_1.png) | ![preview 2](40/preview_2.png) | ![preview 3](40/preview_3.png) | ![preview 4](40/preview_4.png) | ![preview 5](40/preview_5.png) | ![preview 6](40/preview_6.png) | N/A | N/A | | 41 | 180 | [Download](41/dataset.zip) | ![preview 1](41/preview_1.png) | ![preview 2](41/preview_2.png) | ![preview 3](41/preview_3.png) | ![preview 4](41/preview_4.png) | ![preview 5](41/preview_5.png) | ![preview 6](41/preview_6.png) | ![preview 7](41/preview_7.png) | ![preview 8](41/preview_8.png) | | 42 | 30 | [Download](42/dataset.zip) | ![preview 1](42/preview_1.png) | ![preview 2](42/preview_2.png) | ![preview 3](42/preview_3.png) | ![preview 4](42/preview_4.png) | ![preview 5](42/preview_5.png) | ![preview 6](42/preview_6.png) | ![preview 7](42/preview_7.png) | ![preview 8](42/preview_8.png) | | 43 | 6 | [Download](43/dataset.zip) | ![preview 1](43/preview_1.png) | ![preview 2](43/preview_2.png) | ![preview 3](43/preview_3.png) | ![preview 4](43/preview_4.png) | ![preview 5](43/preview_5.png) | ![preview 6](43/preview_6.png) | N/A | N/A | | noise | 448 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/macrossdelta
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-03T00:34:48+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-03T02:17:31+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Macross Delta =================================== This is the image base of bangumi Macross Delta, we detected 45 characters, 4504 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
6c1454871bdc73728713da14d24c9cae296eef05
# Dataset Card for "60k_data_multichoice" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
BaorBaor/60k_data_multichoice
[ "region:us" ]
2023-10-03T00:38:52+00:00
{"dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "A", "dtype": "string"}, {"name": "B", "dtype": "string"}, {"name": "C", "dtype": "string"}, {"name": "D", "dtype": "string"}, {"name": "E", "dtype": "string"}, {"name": "answer", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 330281409, "num_examples": 60347}, {"name": "valid", "num_bytes": 1112116, "num_examples": 200}], "download_size": 183246252, "dataset_size": 331393525}}
2023-10-03T02:03:31+00:00
[]
[]
TAGS #region-us
# Dataset Card for "60k_data_multichoice" More Information needed
[ "# Dataset Card for \"60k_data_multichoice\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"60k_data_multichoice\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"60k_data_multichoice\"\n\nMore Information needed" ]
df7dedca8bf437ae7bdc45fce65f9a8439246b30
# Bangumi Image Base of Akame Ga Kill! This is the image base of bangumi Akame ga Kill!, we detected 40 characters, 2411 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 441 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 121 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 40 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 97 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 258 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 6 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | N/A | N/A | | 6 | 16 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 56 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 118 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 7 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | N/A | | 10 | 38 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 11 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 33 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 30 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 18 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 10 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 17 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 142 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 23 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 43 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 20 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 26 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 102 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 34 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 22 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 13 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 10 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 117 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 7 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | N/A | | 29 | 8 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | ![preview 7](29/preview_7.png) | ![preview 8](29/preview_8.png) | | 30 | 20 | [Download](30/dataset.zip) | ![preview 1](30/preview_1.png) | ![preview 2](30/preview_2.png) | ![preview 3](30/preview_3.png) | ![preview 4](30/preview_4.png) | ![preview 5](30/preview_5.png) | ![preview 6](30/preview_6.png) | ![preview 7](30/preview_7.png) | ![preview 8](30/preview_8.png) | | 31 | 115 | [Download](31/dataset.zip) | ![preview 1](31/preview_1.png) | ![preview 2](31/preview_2.png) | ![preview 3](31/preview_3.png) | ![preview 4](31/preview_4.png) | ![preview 5](31/preview_5.png) | ![preview 6](31/preview_6.png) | ![preview 7](31/preview_7.png) | ![preview 8](31/preview_8.png) | | 32 | 42 | [Download](32/dataset.zip) | ![preview 1](32/preview_1.png) | ![preview 2](32/preview_2.png) | ![preview 3](32/preview_3.png) | ![preview 4](32/preview_4.png) | ![preview 5](32/preview_5.png) | ![preview 6](32/preview_6.png) | ![preview 7](32/preview_7.png) | ![preview 8](32/preview_8.png) | | 33 | 44 | [Download](33/dataset.zip) | ![preview 1](33/preview_1.png) | ![preview 2](33/preview_2.png) | ![preview 3](33/preview_3.png) | ![preview 4](33/preview_4.png) | ![preview 5](33/preview_5.png) | ![preview 6](33/preview_6.png) | ![preview 7](33/preview_7.png) | ![preview 8](33/preview_8.png) | | 34 | 15 | [Download](34/dataset.zip) | ![preview 1](34/preview_1.png) | ![preview 2](34/preview_2.png) | ![preview 3](34/preview_3.png) | ![preview 4](34/preview_4.png) | ![preview 5](34/preview_5.png) | ![preview 6](34/preview_6.png) | ![preview 7](34/preview_7.png) | ![preview 8](34/preview_8.png) | | 35 | 13 | [Download](35/dataset.zip) | ![preview 1](35/preview_1.png) | ![preview 2](35/preview_2.png) | ![preview 3](35/preview_3.png) | ![preview 4](35/preview_4.png) | ![preview 5](35/preview_5.png) | ![preview 6](35/preview_6.png) | ![preview 7](35/preview_7.png) | ![preview 8](35/preview_8.png) | | 36 | 9 | [Download](36/dataset.zip) | ![preview 1](36/preview_1.png) | ![preview 2](36/preview_2.png) | ![preview 3](36/preview_3.png) | ![preview 4](36/preview_4.png) | ![preview 5](36/preview_5.png) | ![preview 6](36/preview_6.png) | ![preview 7](36/preview_7.png) | ![preview 8](36/preview_8.png) | | 37 | 5 | [Download](37/dataset.zip) | ![preview 1](37/preview_1.png) | ![preview 2](37/preview_2.png) | ![preview 3](37/preview_3.png) | ![preview 4](37/preview_4.png) | ![preview 5](37/preview_5.png) | N/A | N/A | N/A | | 38 | 13 | [Download](38/dataset.zip) | ![preview 1](38/preview_1.png) | ![preview 2](38/preview_2.png) | ![preview 3](38/preview_3.png) | ![preview 4](38/preview_4.png) | ![preview 5](38/preview_5.png) | ![preview 6](38/preview_6.png) | ![preview 7](38/preview_7.png) | ![preview 8](38/preview_8.png) | | noise | 251 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/akamegakill
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-03T00:55:07+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-03T02:19:06+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Akame Ga Kill! ==================================== This is the image base of bangumi Akame ga Kill!, we detected 40 characters, 2411 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
3e4d9eee3dd314f61b0a275e1849ef5564e3c32b
credits: shoutout @vikp for his textbook_quality GH repo this was created with dataset info: a bunch of bad boy data for Machiavellian LLMs
karan4d/instruct_machiavellian_textbooks
[ "license:apache-2.0", "region:us" ]
2023-10-03T01:20:08+00:00
{"license": "apache-2.0"}
2023-10-03T15:30:54+00:00
[]
[]
TAGS #license-apache-2.0 #region-us
credits: shoutout @vikp for his textbook_quality GH repo this was created with dataset info: a bunch of bad boy data for Machiavellian LLMs
[]
[ "TAGS\n#license-apache-2.0 #region-us \n" ]
[ 14 ]
[ "passage: TAGS\n#license-apache-2.0 #region-us \n" ]
61d0081637462a67ccb5bbd7ae76de4806dca22e
# Dataset Card for "guanaco-llama2-1k" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Ethan615/guanaco-llama2-1k
[ "region:us" ]
2023-10-03T01:49:06+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1654448, "num_examples": 1000}], "download_size": 966693, "dataset_size": 1654448}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-03T01:49:10+00:00
[]
[]
TAGS #region-us
# Dataset Card for "guanaco-llama2-1k" More Information needed
[ "# Dataset Card for \"guanaco-llama2-1k\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"guanaco-llama2-1k\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"guanaco-llama2-1k\"\n\nMore Information needed" ]
bbcb136a74d60d7087275ca57d6f14578d164f4f
## Training procedure The following `bitsandbytes` quantization config was used during training: - quant_method: bitsandbytes - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: True - bnb_4bit_compute_dtype: bfloat16 The following `bitsandbytes` quantization config was used during training: - quant_method: bitsandbytes - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: True - bnb_4bit_compute_dtype: bfloat16 ### Framework versions - PEFT 0.6.0.dev0 - PEFT 0.6.0.dev0
mooklife/finetune
[ "region:us" ]
2023-10-03T02:08:55+00:00
{"library_name": "peft"}
2023-10-03T02:34:53+00:00
[]
[]
TAGS #region-us
## Training procedure The following 'bitsandbytes' quantization config was used during training: - quant_method: bitsandbytes - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: True - bnb_4bit_compute_dtype: bfloat16 The following 'bitsandbytes' quantization config was used during training: - quant_method: bitsandbytes - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: True - bnb_4bit_compute_dtype: bfloat16 ### Framework versions - PEFT 0.6.0.dev0 - PEFT 0.6.0.dev0
[ "## Training procedure\n\n\nThe following 'bitsandbytes' quantization config was used during training:\n- quant_method: bitsandbytes\n- load_in_8bit: False\n- load_in_4bit: True\n- llm_int8_threshold: 6.0\n- llm_int8_skip_modules: None\n- llm_int8_enable_fp32_cpu_offload: False\n- llm_int8_has_fp16_weight: False\n- bnb_4bit_quant_type: nf4\n- bnb_4bit_use_double_quant: True\n- bnb_4bit_compute_dtype: bfloat16\n\nThe following 'bitsandbytes' quantization config was used during training:\n- quant_method: bitsandbytes\n- load_in_8bit: False\n- load_in_4bit: True\n- llm_int8_threshold: 6.0\n- llm_int8_skip_modules: None\n- llm_int8_enable_fp32_cpu_offload: False\n- llm_int8_has_fp16_weight: False\n- bnb_4bit_quant_type: nf4\n- bnb_4bit_use_double_quant: True\n- bnb_4bit_compute_dtype: bfloat16", "### Framework versions\n\n- PEFT 0.6.0.dev0\n\n- PEFT 0.6.0.dev0" ]
[ "TAGS\n#region-us \n", "## Training procedure\n\n\nThe following 'bitsandbytes' quantization config was used during training:\n- quant_method: bitsandbytes\n- load_in_8bit: False\n- load_in_4bit: True\n- llm_int8_threshold: 6.0\n- llm_int8_skip_modules: None\n- llm_int8_enable_fp32_cpu_offload: False\n- llm_int8_has_fp16_weight: False\n- bnb_4bit_quant_type: nf4\n- bnb_4bit_use_double_quant: True\n- bnb_4bit_compute_dtype: bfloat16\n\nThe following 'bitsandbytes' quantization config was used during training:\n- quant_method: bitsandbytes\n- load_in_8bit: False\n- load_in_4bit: True\n- llm_int8_threshold: 6.0\n- llm_int8_skip_modules: None\n- llm_int8_enable_fp32_cpu_offload: False\n- llm_int8_has_fp16_weight: False\n- bnb_4bit_quant_type: nf4\n- bnb_4bit_use_double_quant: True\n- bnb_4bit_compute_dtype: bfloat16", "### Framework versions\n\n- PEFT 0.6.0.dev0\n\n- PEFT 0.6.0.dev0" ]
[ 6, 325, 25 ]
[ "passage: TAGS\n#region-us \n## Training procedure\n\n\nThe following 'bitsandbytes' quantization config was used during training:\n- quant_method: bitsandbytes\n- load_in_8bit: False\n- load_in_4bit: True\n- llm_int8_threshold: 6.0\n- llm_int8_skip_modules: None\n- llm_int8_enable_fp32_cpu_offload: False\n- llm_int8_has_fp16_weight: False\n- bnb_4bit_quant_type: nf4\n- bnb_4bit_use_double_quant: True\n- bnb_4bit_compute_dtype: bfloat16\n\nThe following 'bitsandbytes' quantization config was used during training:\n- quant_method: bitsandbytes\n- load_in_8bit: False\n- load_in_4bit: True\n- llm_int8_threshold: 6.0\n- llm_int8_skip_modules: None\n- llm_int8_enable_fp32_cpu_offload: False\n- llm_int8_has_fp16_weight: False\n- bnb_4bit_quant_type: nf4\n- bnb_4bit_use_double_quant: True\n- bnb_4bit_compute_dtype: bfloat16### Framework versions\n\n- PEFT 0.6.0.dev0\n\n- PEFT 0.6.0.dev0" ]
7b2d4e8b9c6be751baf757d20c295a0c8a651a52
# Dataset Card for "my_dataset_01" This is a dataset for captioning graph images [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
harinarayan/my_dataset_01
[ "region:us" ]
2023-10-03T02:16:06+00:00
{"dataset_info": {"features": [{"name": "image_file", "dtype": "string"}, {"name": "caption", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2422, "num_examples": 20}], "download_size": 2850, "dataset_size": 2422}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-03T03:15:57+00:00
[]
[]
TAGS #region-us
# Dataset Card for "my_dataset_01" This is a dataset for captioning graph images More Information needed
[ "# Dataset Card for \"my_dataset_01\"\nThis is a dataset for captioning graph images\n\n \nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"my_dataset_01\"\nThis is a dataset for captioning graph images\n\n \nMore Information needed" ]
[ 6, 27 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"my_dataset_01\"\nThis is a dataset for captioning graph images\n\n \nMore Information needed" ]
a6400692b91aeed86c26a8750edda64a0dec1386
# Bangumi Image Base of Joshikousei No Mudazukai This is the image base of bangumi Joshikousei no Mudazukai, we detected 23 characters, 1598 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 202 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 99 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 11 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 19 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 41 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 74 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 271 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 10 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 22 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 7 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | N/A | | 10 | 11 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 190 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 33 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 79 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 12 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 110 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 14 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 86 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 147 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 6 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | N/A | N/A | | 20 | 5 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | N/A | N/A | N/A | | 21 | 6 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | N/A | N/A | | noise | 143 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/joshikouseinomudazukai
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-03T02:20:13+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-03T03:21:07+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Joshikousei No Mudazukai ============================================== This is the image base of bangumi Joshikousei no Mudazukai, we detected 23 characters, 1598 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
b297d4fb6f9e6783c568cfa2754ffa02b454084b
# Dataset Card for "test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
cuongnl/test
[ "region:us" ]
2023-10-03T02:45:55+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "deer"}}}}], "splits": [{"name": "train", "num_bytes": 28405705.68, "num_examples": 56}, {"name": "test", "num_bytes": 10075058.57, "num_examples": 19}], "download_size": 38485702, "dataset_size": 38480764.25}}
2023-10-03T02:46:03+00:00
[]
[]
TAGS #region-us
# Dataset Card for "test" More Information needed
[ "# Dataset Card for \"test\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"test\"\n\nMore Information needed" ]
[ 6, 11 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"test\"\n\nMore Information needed" ]
0c47ca7d9e9b2b3fc7238462a54b38ea47966a31
# Dataset Card for "azaria-mitchell-diff-filtered" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
notrichardren/azaria-mitchell-diff-filtered
[ "region:us" ]
2023-10-03T02:54:39+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "cities", "path": "data/cities-*"}, {"split": "companies", "path": "data/companies-*"}, {"split": "animals", "path": "data/animals-*"}, {"split": "elements", "path": "data/elements-*"}, {"split": "inventions", "path": "data/inventions-*"}, {"split": "facts", "path": "data/facts-*"}]}], "dataset_info": {"features": [{"name": "claim", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "dataset", "dtype": "string"}, {"name": "qa_type", "dtype": "int64"}, {"name": "ind", "dtype": "int64"}], "splits": [{"name": "cities", "num_bytes": 7955, "num_examples": 112}, {"name": "companies", "num_bytes": 14588, "num_examples": 129}, {"name": "animals", "num_bytes": 11451, "num_examples": 137}, {"name": "elements", "num_bytes": 11617, "num_examples": 139}, {"name": "inventions", "num_bytes": 10559, "num_examples": 127}, {"name": "facts", "num_bytes": 14809, "num_examples": 159}], "download_size": 44699, "dataset_size": 70979}}
2023-10-03T03:33:20+00:00
[]
[]
TAGS #region-us
# Dataset Card for "azaria-mitchell-diff-filtered" More Information needed
[ "# Dataset Card for \"azaria-mitchell-diff-filtered\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"azaria-mitchell-diff-filtered\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"azaria-mitchell-diff-filtered\"\n\nMore Information needed" ]
8115f49f1b3abe69a3e2eeab1a3c31ef9b838145
# Dataset Card for ExpertQA ## Dataset Description - **Repository: https://github.com/chaitanyamalaviya/ExpertQA** - **Paper: https://arxiv.org/pdf/2309.07852** - **Point of Contact: [email protected]** ### Dataset Summary We provide here the data accompanying the paper: [ExpertQA: Expert-Curated Questions and Attributed Answers](https://arxiv.org/pdf/2309.07852). The ExpertQA dataset contains 2177 examples from 32 different fields. ### Supported Tasks The `main` data contains 2177 examples that can be used to evaluate new methods for estimating factuality and attribution, while the `lfqa_domain` and `lfqa_rand` data can be used to evaluate long-form question answering systems. ## Dataset Creation ### Curation Rationale ExpertQA was created to evaluate factuality & attribution in language model responses to domain-specific questions, as well as evaluate long-form question answering in domain-specific settings. ### Annotation Process Questions in ExpertQA were formulated by experts spanning 32 fields. The answers to these questions are expert-verified, model-generated answers to these questions. Each claim-evidence pair in an answer is judged by experts for various properties such as the claim’s informativeness, factuality, citeworthiness, whether the claim is supported by the evidence, and reliability of the evidence source. Further, experts revise the original claims to ensure they are factual and supported by trustworthy sources. ## Dataset Structure ### Data Instances We provide the main data, with judgements of factuality and attribution, under the `default` subset. The long-form QA data splits are provided at `lfqa_domain` (domain split) and `lfqa_rand` (random split). Additional files are provided in our [GitHub repo](https://github.com/chaitanyamalaviya/ExpertQA). ### Data Fields The main data file contains newline-separated json dictionaries with the following fields: * `question` - Question written by an expert. * `annotator_id` - Anonymized annotator ID of the author of the question. * `answers` - Dict mapping model names to an Answer object. The model names can be one of `{gpt4, bing_chat, rr_sphere_gpt4, rr_gs_gpt4, post_hoc_sphere_gpt4, post_hoc_gs_gpt4}`. * `metadata` - A dictionary with the following fields: * `question_type` - The question type(s) separated by "|". * `field` - The field to which the annotator belonged. * `specific_field` - More specific field name within the broader field. Each Answer object contains the following fields: * `answer_string`: The answer string. * `attribution`: List of evidences for the answer (not linked to specific claims). Note that these are only URLs, the evidence passages are stored in the Claim object -- see below. * `claims`: List of Claim objects for the answer. * `revised_answer_string`: Revised answer by annotator. * `usefulness`: Usefulness of original answer marked by annotator. * `annotation_time`: Time taken for annotating this answer. * `annotator_id`: Anonymized annotator ID of the person who validated this answer. Each Claim object contains the following fields: * `claim_string`: Original claim string. * `evidence`: List of evidences for the claim (URL+passage or URL). * `support`: Attribution marked by annotator. * `reason_missing_support`: Reason for missing support specified by annotator. * `informativeness`: Informativeness of claim for the question, marked by annotator. * `worthiness`: Worthiness of citing claim marked by annotator. * `correctness`: Factual correctness of claim marked by annotator. * `reliability`: Reliability of source evidence marked by annotator. * `revised_claim`: Revised claim by annotator. * `revised_evidence`: Revised evidence by annotator. ### Citation Information ``` @inproceedings{malaviya23expertqa, title = {ExpertQA: Expert-Curated Questions and Attributed Answers}, author = {Chaitanya Malaviya and Subin Lee and Sihao Chen and Elizabeth Sieber and Mark Yatskar and Dan Roth}, booktitle = {arXiv}, month = {September}, year = {2023}, url = "https://arxiv.org/abs/2309.07852" } ```
cmalaviya/expertqa
[ "task_categories:question-answering", "annotations_creators:expert-generated", "size_categories:1K<n<10K", "source_datasets:original", "language:en", "license:mit", "arxiv:2309.07852", "region:us" ]
2023-10-03T03:02:09+00:00
{"annotations_creators": ["expert-generated"], "language": ["en"], "license": "mit", "size_categories": ["1K<n<10K"], "source_datasets": ["original"], "task_categories": ["question-answering"], "pretty_name": "ExpertQA", "configs": [{"config_name": "main", "data_files": "r2_compiled_anon_fixed.jsonl"}, {"config_name": "lfqa_random", "data_files": [{"split": "train", "path": "rand_lfqa_train.json"}, {"split": "test", "path": "rand_lfqa_test.json"}, {"split": "validation", "path": "rand_lfqa_val.json"}]}, {"config_name": "lfqa_domain", "data_files": [{"split": "train", "path": "domain_lfqa_train.json"}, {"split": "test", "path": "domain_lfqa_test.json"}, {"split": "validation", "path": "domain_lfqa_val.json"}]}]}
2023-10-07T04:07:10+00:00
[ "2309.07852" ]
[ "en" ]
TAGS #task_categories-question-answering #annotations_creators-expert-generated #size_categories-1K<n<10K #source_datasets-original #language-English #license-mit #arxiv-2309.07852 #region-us
# Dataset Card for ExpertQA ## Dataset Description - Repository: URL - Paper: URL - Point of Contact: chaitanyamalaviya@URL ### Dataset Summary We provide here the data accompanying the paper: ExpertQA: Expert-Curated Questions and Attributed Answers. The ExpertQA dataset contains 2177 examples from 32 different fields. ### Supported Tasks The 'main' data contains 2177 examples that can be used to evaluate new methods for estimating factuality and attribution, while the 'lfqa_domain' and 'lfqa_rand' data can be used to evaluate long-form question answering systems. ## Dataset Creation ### Curation Rationale ExpertQA was created to evaluate factuality & attribution in language model responses to domain-specific questions, as well as evaluate long-form question answering in domain-specific settings. ### Annotation Process Questions in ExpertQA were formulated by experts spanning 32 fields. The answers to these questions are expert-verified, model-generated answers to these questions. Each claim-evidence pair in an answer is judged by experts for various properties such as the claim’s informativeness, factuality, citeworthiness, whether the claim is supported by the evidence, and reliability of the evidence source. Further, experts revise the original claims to ensure they are factual and supported by trustworthy sources. ## Dataset Structure ### Data Instances We provide the main data, with judgements of factuality and attribution, under the 'default' subset. The long-form QA data splits are provided at 'lfqa_domain' (domain split) and 'lfqa_rand' (random split). Additional files are provided in our GitHub repo. ### Data Fields The main data file contains newline-separated json dictionaries with the following fields: * 'question' - Question written by an expert. * 'annotator_id' - Anonymized annotator ID of the author of the question. * 'answers' - Dict mapping model names to an Answer object. The model names can be one of '{gpt4, bing_chat, rr_sphere_gpt4, rr_gs_gpt4, post_hoc_sphere_gpt4, post_hoc_gs_gpt4}'. * 'metadata' - A dictionary with the following fields: * 'question_type' - The question type(s) separated by "|". * 'field' - The field to which the annotator belonged. * 'specific_field' - More specific field name within the broader field. Each Answer object contains the following fields: * 'answer_string': The answer string. * 'attribution': List of evidences for the answer (not linked to specific claims). Note that these are only URLs, the evidence passages are stored in the Claim object -- see below. * 'claims': List of Claim objects for the answer. * 'revised_answer_string': Revised answer by annotator. * 'usefulness': Usefulness of original answer marked by annotator. * 'annotation_time': Time taken for annotating this answer. * 'annotator_id': Anonymized annotator ID of the person who validated this answer. Each Claim object contains the following fields: * 'claim_string': Original claim string. * 'evidence': List of evidences for the claim (URL+passage or URL). * 'support': Attribution marked by annotator. * 'reason_missing_support': Reason for missing support specified by annotator. * 'informativeness': Informativeness of claim for the question, marked by annotator. * 'worthiness': Worthiness of citing claim marked by annotator. * 'correctness': Factual correctness of claim marked by annotator. * 'reliability': Reliability of source evidence marked by annotator. * 'revised_claim': Revised claim by annotator. * 'revised_evidence': Revised evidence by annotator.
[ "# Dataset Card for ExpertQA", "## Dataset Description\n\n- Repository: URL \n- Paper: URL \n- Point of Contact: chaitanyamalaviya@URL", "### Dataset Summary\n\nWe provide here the data accompanying the paper: ExpertQA: Expert-Curated Questions and Attributed Answers. The ExpertQA dataset contains 2177 examples from 32 different fields.", "### Supported Tasks\n\nThe 'main' data contains 2177 examples that can be used to evaluate new methods for estimating factuality and attribution, while the 'lfqa_domain' and 'lfqa_rand' data can be used to evaluate long-form question answering systems.", "## Dataset Creation", "### Curation Rationale\n\nExpertQA was created to evaluate factuality & attribution in language model responses to domain-specific questions, as well as evaluate long-form question answering in domain-specific settings.", "### Annotation Process\n\nQuestions in ExpertQA were formulated by experts spanning 32 fields. The answers to these questions are expert-verified, model-generated answers to these questions. Each claim-evidence pair in an answer is judged by experts for various properties such as the claim’s informativeness, factuality, citeworthiness, whether the claim is supported by the evidence, and reliability of the evidence source. Further, experts revise the original claims to ensure they are factual and supported by trustworthy sources.", "## Dataset Structure", "### Data Instances\n\nWe provide the main data, with judgements of factuality and attribution, under the 'default' subset. \nThe long-form QA data splits are provided at 'lfqa_domain' (domain split) and 'lfqa_rand' (random split).\nAdditional files are provided in our GitHub repo.", "### Data Fields\n\nThe main data file contains newline-separated json dictionaries with the following fields:\n* 'question' - Question written by an expert.\n* 'annotator_id' - Anonymized annotator ID of the author of the question.\n* 'answers' - Dict mapping model names to an Answer object. The model names can be one of '{gpt4, bing_chat, rr_sphere_gpt4, rr_gs_gpt4, post_hoc_sphere_gpt4, post_hoc_gs_gpt4}'. \n* 'metadata' - A dictionary with the following fields:\n * 'question_type' - The question type(s) separated by \"|\".\n * 'field' - The field to which the annotator belonged.\n * 'specific_field' - More specific field name within the broader field.\n\nEach Answer object contains the following fields:\n* 'answer_string': The answer string.\n* 'attribution': List of evidences for the answer (not linked to specific claims). Note that these are only URLs, the evidence passages are stored in the Claim object -- see below.\n* 'claims': List of Claim objects for the answer.\n* 'revised_answer_string': Revised answer by annotator.\n* 'usefulness': Usefulness of original answer marked by annotator.\n* 'annotation_time': Time taken for annotating this answer.\n* 'annotator_id': Anonymized annotator ID of the person who validated this answer.\n\nEach Claim object contains the following fields:\n* 'claim_string': Original claim string.\n* 'evidence': List of evidences for the claim (URL+passage or URL).\n* 'support': Attribution marked by annotator.\n* 'reason_missing_support': Reason for missing support specified by annotator.\n* 'informativeness': Informativeness of claim for the question, marked by annotator.\n* 'worthiness': Worthiness of citing claim marked by annotator.\n* 'correctness': Factual correctness of claim marked by annotator.\n* 'reliability': Reliability of source evidence marked by annotator.\n* 'revised_claim': Revised claim by annotator.\n* 'revised_evidence': Revised evidence by annotator." ]
[ "TAGS\n#task_categories-question-answering #annotations_creators-expert-generated #size_categories-1K<n<10K #source_datasets-original #language-English #license-mit #arxiv-2309.07852 #region-us \n", "# Dataset Card for ExpertQA", "## Dataset Description\n\n- Repository: URL \n- Paper: URL \n- Point of Contact: chaitanyamalaviya@URL", "### Dataset Summary\n\nWe provide here the data accompanying the paper: ExpertQA: Expert-Curated Questions and Attributed Answers. The ExpertQA dataset contains 2177 examples from 32 different fields.", "### Supported Tasks\n\nThe 'main' data contains 2177 examples that can be used to evaluate new methods for estimating factuality and attribution, while the 'lfqa_domain' and 'lfqa_rand' data can be used to evaluate long-form question answering systems.", "## Dataset Creation", "### Curation Rationale\n\nExpertQA was created to evaluate factuality & attribution in language model responses to domain-specific questions, as well as evaluate long-form question answering in domain-specific settings.", "### Annotation Process\n\nQuestions in ExpertQA were formulated by experts spanning 32 fields. The answers to these questions are expert-verified, model-generated answers to these questions. Each claim-evidence pair in an answer is judged by experts for various properties such as the claim’s informativeness, factuality, citeworthiness, whether the claim is supported by the evidence, and reliability of the evidence source. Further, experts revise the original claims to ensure they are factual and supported by trustworthy sources.", "## Dataset Structure", "### Data Instances\n\nWe provide the main data, with judgements of factuality and attribution, under the 'default' subset. \nThe long-form QA data splits are provided at 'lfqa_domain' (domain split) and 'lfqa_rand' (random split).\nAdditional files are provided in our GitHub repo.", "### Data Fields\n\nThe main data file contains newline-separated json dictionaries with the following fields:\n* 'question' - Question written by an expert.\n* 'annotator_id' - Anonymized annotator ID of the author of the question.\n* 'answers' - Dict mapping model names to an Answer object. The model names can be one of '{gpt4, bing_chat, rr_sphere_gpt4, rr_gs_gpt4, post_hoc_sphere_gpt4, post_hoc_gs_gpt4}'. \n* 'metadata' - A dictionary with the following fields:\n * 'question_type' - The question type(s) separated by \"|\".\n * 'field' - The field to which the annotator belonged.\n * 'specific_field' - More specific field name within the broader field.\n\nEach Answer object contains the following fields:\n* 'answer_string': The answer string.\n* 'attribution': List of evidences for the answer (not linked to specific claims). Note that these are only URLs, the evidence passages are stored in the Claim object -- see below.\n* 'claims': List of Claim objects for the answer.\n* 'revised_answer_string': Revised answer by annotator.\n* 'usefulness': Usefulness of original answer marked by annotator.\n* 'annotation_time': Time taken for annotating this answer.\n* 'annotator_id': Anonymized annotator ID of the person who validated this answer.\n\nEach Claim object contains the following fields:\n* 'claim_string': Original claim string.\n* 'evidence': List of evidences for the claim (URL+passage or URL).\n* 'support': Attribution marked by annotator.\n* 'reason_missing_support': Reason for missing support specified by annotator.\n* 'informativeness': Informativeness of claim for the question, marked by annotator.\n* 'worthiness': Worthiness of citing claim marked by annotator.\n* 'correctness': Factual correctness of claim marked by annotator.\n* 'reliability': Reliability of source evidence marked by annotator.\n* 'revised_claim': Revised claim by annotator.\n* 'revised_evidence': Revised evidence by annotator." ]
[ 69, 7, 26, 51, 67, 5, 49, 120, 6, 80, 564 ]
[ "passage: TAGS\n#task_categories-question-answering #annotations_creators-expert-generated #size_categories-1K<n<10K #source_datasets-original #language-English #license-mit #arxiv-2309.07852 #region-us \n# Dataset Card for ExpertQA## Dataset Description\n\n- Repository: URL \n- Paper: URL \n- Point of Contact: chaitanyamalaviya@URL### Dataset Summary\n\nWe provide here the data accompanying the paper: ExpertQA: Expert-Curated Questions and Attributed Answers. The ExpertQA dataset contains 2177 examples from 32 different fields.### Supported Tasks\n\nThe 'main' data contains 2177 examples that can be used to evaluate new methods for estimating factuality and attribution, while the 'lfqa_domain' and 'lfqa_rand' data can be used to evaluate long-form question answering systems.## Dataset Creation### Curation Rationale\n\nExpertQA was created to evaluate factuality & attribution in language model responses to domain-specific questions, as well as evaluate long-form question answering in domain-specific settings.### Annotation Process\n\nQuestions in ExpertQA were formulated by experts spanning 32 fields. The answers to these questions are expert-verified, model-generated answers to these questions. Each claim-evidence pair in an answer is judged by experts for various properties such as the claim’s informativeness, factuality, citeworthiness, whether the claim is supported by the evidence, and reliability of the evidence source. Further, experts revise the original claims to ensure they are factual and supported by trustworthy sources.## Dataset Structure### Data Instances\n\nWe provide the main data, with judgements of factuality and attribution, under the 'default' subset. \nThe long-form QA data splits are provided at 'lfqa_domain' (domain split) and 'lfqa_rand' (random split).\nAdditional files are provided in our GitHub repo." ]
bc3f7c3f71c1d9302e102b12aaeb8b21db4581c1
# Dataset Card for "TTS_Speaker_01" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
DataStudio/TTS_Speaker_01
[ "region:us" ]
2023-10-03T03:02:40+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "content", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1069341549.668, "num_examples": 8518}], "download_size": 776772238, "dataset_size": 1069341549.668}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-03T03:03:18+00:00
[]
[]
TAGS #region-us
# Dataset Card for "TTS_Speaker_01" More Information needed
[ "# Dataset Card for \"TTS_Speaker_01\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"TTS_Speaker_01\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"TTS_Speaker_01\"\n\nMore Information needed" ]
9cec67b27438260d4f75d56bffdaa1b180f55210
# Dataset Card for "LLaVA_Mega_JSON" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
vilm/LLaVA_Mega_JSON
[ "region:us" ]
2023-10-03T03:23:47+00:00
{"dataset_info": {"features": [{"name": "global_image_id", "dtype": "string"}, {"name": "image_path", "dtype": "string"}, {"name": "dialog", "sequence": {"sequence": "string"}}, {"name": "anns_id", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "convo", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 812129242, "num_examples": 133571}], "download_size": 257146024, "dataset_size": 812129242}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-04T02:22:41+00:00
[]
[]
TAGS #region-us
# Dataset Card for "LLaVA_Mega_JSON" More Information needed
[ "# Dataset Card for \"LLaVA_Mega_JSON\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"LLaVA_Mega_JSON\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"LLaVA_Mega_JSON\"\n\nMore Information needed" ]
a10f216f57d605eb0f21bc2964f3571e50f93954
# Bangumi Image Base of Demon Slayer This is the image base of bangumi Demon Slayer, we detected 78 characters, 5890 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 256 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 42 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 305 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 10 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 31 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 23 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 50 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 1991 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 82 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 192 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 72 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 87 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 43 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 61 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 53 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 34 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 58 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 32 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 56 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 48 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 32 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 37 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 48 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 186 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 47 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 23 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 94 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 37 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 28 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | ![preview 8](28/preview_8.png) | | 29 | 24 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | ![preview 7](29/preview_7.png) | ![preview 8](29/preview_8.png) | | 30 | 46 | [Download](30/dataset.zip) | ![preview 1](30/preview_1.png) | ![preview 2](30/preview_2.png) | ![preview 3](30/preview_3.png) | ![preview 4](30/preview_4.png) | ![preview 5](30/preview_5.png) | ![preview 6](30/preview_6.png) | ![preview 7](30/preview_7.png) | ![preview 8](30/preview_8.png) | | 31 | 35 | [Download](31/dataset.zip) | ![preview 1](31/preview_1.png) | ![preview 2](31/preview_2.png) | ![preview 3](31/preview_3.png) | ![preview 4](31/preview_4.png) | ![preview 5](31/preview_5.png) | ![preview 6](31/preview_6.png) | ![preview 7](31/preview_7.png) | ![preview 8](31/preview_8.png) | | 32 | 105 | [Download](32/dataset.zip) | ![preview 1](32/preview_1.png) | ![preview 2](32/preview_2.png) | ![preview 3](32/preview_3.png) | ![preview 4](32/preview_4.png) | ![preview 5](32/preview_5.png) | ![preview 6](32/preview_6.png) | ![preview 7](32/preview_7.png) | ![preview 8](32/preview_8.png) | | 33 | 22 | [Download](33/dataset.zip) | ![preview 1](33/preview_1.png) | ![preview 2](33/preview_2.png) | ![preview 3](33/preview_3.png) | ![preview 4](33/preview_4.png) | ![preview 5](33/preview_5.png) | ![preview 6](33/preview_6.png) | ![preview 7](33/preview_7.png) | ![preview 8](33/preview_8.png) | | 34 | 17 | [Download](34/dataset.zip) | ![preview 1](34/preview_1.png) | ![preview 2](34/preview_2.png) | ![preview 3](34/preview_3.png) | ![preview 4](34/preview_4.png) | ![preview 5](34/preview_5.png) | ![preview 6](34/preview_6.png) | ![preview 7](34/preview_7.png) | ![preview 8](34/preview_8.png) | | 35 | 37 | [Download](35/dataset.zip) | ![preview 1](35/preview_1.png) | ![preview 2](35/preview_2.png) | ![preview 3](35/preview_3.png) | ![preview 4](35/preview_4.png) | ![preview 5](35/preview_5.png) | ![preview 6](35/preview_6.png) | ![preview 7](35/preview_7.png) | ![preview 8](35/preview_8.png) | | 36 | 17 | [Download](36/dataset.zip) | ![preview 1](36/preview_1.png) | ![preview 2](36/preview_2.png) | ![preview 3](36/preview_3.png) | ![preview 4](36/preview_4.png) | ![preview 5](36/preview_5.png) | ![preview 6](36/preview_6.png) | ![preview 7](36/preview_7.png) | ![preview 8](36/preview_8.png) | | 37 | 12 | [Download](37/dataset.zip) | ![preview 1](37/preview_1.png) | ![preview 2](37/preview_2.png) | ![preview 3](37/preview_3.png) | ![preview 4](37/preview_4.png) | ![preview 5](37/preview_5.png) | ![preview 6](37/preview_6.png) | ![preview 7](37/preview_7.png) | ![preview 8](37/preview_8.png) | | 38 | 25 | [Download](38/dataset.zip) | ![preview 1](38/preview_1.png) | ![preview 2](38/preview_2.png) | ![preview 3](38/preview_3.png) | ![preview 4](38/preview_4.png) | ![preview 5](38/preview_5.png) | ![preview 6](38/preview_6.png) | ![preview 7](38/preview_7.png) | ![preview 8](38/preview_8.png) | | 39 | 14 | [Download](39/dataset.zip) | ![preview 1](39/preview_1.png) | ![preview 2](39/preview_2.png) | ![preview 3](39/preview_3.png) | ![preview 4](39/preview_4.png) | ![preview 5](39/preview_5.png) | ![preview 6](39/preview_6.png) | ![preview 7](39/preview_7.png) | ![preview 8](39/preview_8.png) | | 40 | 18 | [Download](40/dataset.zip) | ![preview 1](40/preview_1.png) | ![preview 2](40/preview_2.png) | ![preview 3](40/preview_3.png) | ![preview 4](40/preview_4.png) | ![preview 5](40/preview_5.png) | ![preview 6](40/preview_6.png) | ![preview 7](40/preview_7.png) | ![preview 8](40/preview_8.png) | | 41 | 92 | [Download](41/dataset.zip) | ![preview 1](41/preview_1.png) | ![preview 2](41/preview_2.png) | ![preview 3](41/preview_3.png) | ![preview 4](41/preview_4.png) | ![preview 5](41/preview_5.png) | ![preview 6](41/preview_6.png) | ![preview 7](41/preview_7.png) | ![preview 8](41/preview_8.png) | | 42 | 77 | [Download](42/dataset.zip) | ![preview 1](42/preview_1.png) | ![preview 2](42/preview_2.png) | ![preview 3](42/preview_3.png) | ![preview 4](42/preview_4.png) | ![preview 5](42/preview_5.png) | ![preview 6](42/preview_6.png) | ![preview 7](42/preview_7.png) | ![preview 8](42/preview_8.png) | | 43 | 16 | [Download](43/dataset.zip) | ![preview 1](43/preview_1.png) | ![preview 2](43/preview_2.png) | ![preview 3](43/preview_3.png) | ![preview 4](43/preview_4.png) | ![preview 5](43/preview_5.png) | ![preview 6](43/preview_6.png) | ![preview 7](43/preview_7.png) | ![preview 8](43/preview_8.png) | | 44 | 44 | [Download](44/dataset.zip) | ![preview 1](44/preview_1.png) | ![preview 2](44/preview_2.png) | ![preview 3](44/preview_3.png) | ![preview 4](44/preview_4.png) | ![preview 5](44/preview_5.png) | ![preview 6](44/preview_6.png) | ![preview 7](44/preview_7.png) | ![preview 8](44/preview_8.png) | | 45 | 30 | [Download](45/dataset.zip) | ![preview 1](45/preview_1.png) | ![preview 2](45/preview_2.png) | ![preview 3](45/preview_3.png) | ![preview 4](45/preview_4.png) | ![preview 5](45/preview_5.png) | ![preview 6](45/preview_6.png) | ![preview 7](45/preview_7.png) | ![preview 8](45/preview_8.png) | | 46 | 16 | [Download](46/dataset.zip) | ![preview 1](46/preview_1.png) | ![preview 2](46/preview_2.png) | ![preview 3](46/preview_3.png) | ![preview 4](46/preview_4.png) | ![preview 5](46/preview_5.png) | ![preview 6](46/preview_6.png) | ![preview 7](46/preview_7.png) | ![preview 8](46/preview_8.png) | | 47 | 73 | [Download](47/dataset.zip) | ![preview 1](47/preview_1.png) | ![preview 2](47/preview_2.png) | ![preview 3](47/preview_3.png) | ![preview 4](47/preview_4.png) | ![preview 5](47/preview_5.png) | ![preview 6](47/preview_6.png) | ![preview 7](47/preview_7.png) | ![preview 8](47/preview_8.png) | | 48 | 149 | [Download](48/dataset.zip) | ![preview 1](48/preview_1.png) | ![preview 2](48/preview_2.png) | ![preview 3](48/preview_3.png) | ![preview 4](48/preview_4.png) | ![preview 5](48/preview_5.png) | ![preview 6](48/preview_6.png) | ![preview 7](48/preview_7.png) | ![preview 8](48/preview_8.png) | | 49 | 17 | [Download](49/dataset.zip) | ![preview 1](49/preview_1.png) | ![preview 2](49/preview_2.png) | ![preview 3](49/preview_3.png) | ![preview 4](49/preview_4.png) | ![preview 5](49/preview_5.png) | ![preview 6](49/preview_6.png) | ![preview 7](49/preview_7.png) | ![preview 8](49/preview_8.png) | | 50 | 34 | [Download](50/dataset.zip) | ![preview 1](50/preview_1.png) | ![preview 2](50/preview_2.png) | ![preview 3](50/preview_3.png) | ![preview 4](50/preview_4.png) | ![preview 5](50/preview_5.png) | ![preview 6](50/preview_6.png) | ![preview 7](50/preview_7.png) | ![preview 8](50/preview_8.png) | | 51 | 13 | [Download](51/dataset.zip) | ![preview 1](51/preview_1.png) | ![preview 2](51/preview_2.png) | ![preview 3](51/preview_3.png) | ![preview 4](51/preview_4.png) | ![preview 5](51/preview_5.png) | ![preview 6](51/preview_6.png) | ![preview 7](51/preview_7.png) | ![preview 8](51/preview_8.png) | | 52 | 31 | [Download](52/dataset.zip) | ![preview 1](52/preview_1.png) | ![preview 2](52/preview_2.png) | ![preview 3](52/preview_3.png) | ![preview 4](52/preview_4.png) | ![preview 5](52/preview_5.png) | ![preview 6](52/preview_6.png) | ![preview 7](52/preview_7.png) | ![preview 8](52/preview_8.png) | | 53 | 8 | [Download](53/dataset.zip) | ![preview 1](53/preview_1.png) | ![preview 2](53/preview_2.png) | ![preview 3](53/preview_3.png) | ![preview 4](53/preview_4.png) | ![preview 5](53/preview_5.png) | ![preview 6](53/preview_6.png) | ![preview 7](53/preview_7.png) | ![preview 8](53/preview_8.png) | | 54 | 165 | [Download](54/dataset.zip) | ![preview 1](54/preview_1.png) | ![preview 2](54/preview_2.png) | ![preview 3](54/preview_3.png) | ![preview 4](54/preview_4.png) | ![preview 5](54/preview_5.png) | ![preview 6](54/preview_6.png) | ![preview 7](54/preview_7.png) | ![preview 8](54/preview_8.png) | | 55 | 53 | [Download](55/dataset.zip) | ![preview 1](55/preview_1.png) | ![preview 2](55/preview_2.png) | ![preview 3](55/preview_3.png) | ![preview 4](55/preview_4.png) | ![preview 5](55/preview_5.png) | ![preview 6](55/preview_6.png) | ![preview 7](55/preview_7.png) | ![preview 8](55/preview_8.png) | | 56 | 19 | [Download](56/dataset.zip) | ![preview 1](56/preview_1.png) | ![preview 2](56/preview_2.png) | ![preview 3](56/preview_3.png) | ![preview 4](56/preview_4.png) | ![preview 5](56/preview_5.png) | ![preview 6](56/preview_6.png) | ![preview 7](56/preview_7.png) | ![preview 8](56/preview_8.png) | | 57 | 24 | [Download](57/dataset.zip) | ![preview 1](57/preview_1.png) | ![preview 2](57/preview_2.png) | ![preview 3](57/preview_3.png) | ![preview 4](57/preview_4.png) | ![preview 5](57/preview_5.png) | ![preview 6](57/preview_6.png) | ![preview 7](57/preview_7.png) | ![preview 8](57/preview_8.png) | | 58 | 20 | [Download](58/dataset.zip) | ![preview 1](58/preview_1.png) | ![preview 2](58/preview_2.png) | ![preview 3](58/preview_3.png) | ![preview 4](58/preview_4.png) | ![preview 5](58/preview_5.png) | ![preview 6](58/preview_6.png) | ![preview 7](58/preview_7.png) | ![preview 8](58/preview_8.png) | | 59 | 15 | [Download](59/dataset.zip) | ![preview 1](59/preview_1.png) | ![preview 2](59/preview_2.png) | ![preview 3](59/preview_3.png) | ![preview 4](59/preview_4.png) | ![preview 5](59/preview_5.png) | ![preview 6](59/preview_6.png) | ![preview 7](59/preview_7.png) | ![preview 8](59/preview_8.png) | | 60 | 18 | [Download](60/dataset.zip) | ![preview 1](60/preview_1.png) | ![preview 2](60/preview_2.png) | ![preview 3](60/preview_3.png) | ![preview 4](60/preview_4.png) | ![preview 5](60/preview_5.png) | ![preview 6](60/preview_6.png) | ![preview 7](60/preview_7.png) | ![preview 8](60/preview_8.png) | | 61 | 18 | [Download](61/dataset.zip) | ![preview 1](61/preview_1.png) | ![preview 2](61/preview_2.png) | ![preview 3](61/preview_3.png) | ![preview 4](61/preview_4.png) | ![preview 5](61/preview_5.png) | ![preview 6](61/preview_6.png) | ![preview 7](61/preview_7.png) | ![preview 8](61/preview_8.png) | | 62 | 19 | [Download](62/dataset.zip) | ![preview 1](62/preview_1.png) | ![preview 2](62/preview_2.png) | ![preview 3](62/preview_3.png) | ![preview 4](62/preview_4.png) | ![preview 5](62/preview_5.png) | ![preview 6](62/preview_6.png) | ![preview 7](62/preview_7.png) | ![preview 8](62/preview_8.png) | | 63 | 33 | [Download](63/dataset.zip) | ![preview 1](63/preview_1.png) | ![preview 2](63/preview_2.png) | ![preview 3](63/preview_3.png) | ![preview 4](63/preview_4.png) | ![preview 5](63/preview_5.png) | ![preview 6](63/preview_6.png) | ![preview 7](63/preview_7.png) | ![preview 8](63/preview_8.png) | | 64 | 13 | [Download](64/dataset.zip) | ![preview 1](64/preview_1.png) | ![preview 2](64/preview_2.png) | ![preview 3](64/preview_3.png) | ![preview 4](64/preview_4.png) | ![preview 5](64/preview_5.png) | ![preview 6](64/preview_6.png) | ![preview 7](64/preview_7.png) | ![preview 8](64/preview_8.png) | | 65 | 16 | [Download](65/dataset.zip) | ![preview 1](65/preview_1.png) | ![preview 2](65/preview_2.png) | ![preview 3](65/preview_3.png) | ![preview 4](65/preview_4.png) | ![preview 5](65/preview_5.png) | ![preview 6](65/preview_6.png) | ![preview 7](65/preview_7.png) | ![preview 8](65/preview_8.png) | | 66 | 5 | [Download](66/dataset.zip) | ![preview 1](66/preview_1.png) | ![preview 2](66/preview_2.png) | ![preview 3](66/preview_3.png) | ![preview 4](66/preview_4.png) | ![preview 5](66/preview_5.png) | N/A | N/A | N/A | | 67 | 22 | [Download](67/dataset.zip) | ![preview 1](67/preview_1.png) | ![preview 2](67/preview_2.png) | ![preview 3](67/preview_3.png) | ![preview 4](67/preview_4.png) | ![preview 5](67/preview_5.png) | ![preview 6](67/preview_6.png) | ![preview 7](67/preview_7.png) | ![preview 8](67/preview_8.png) | | 68 | 15 | [Download](68/dataset.zip) | ![preview 1](68/preview_1.png) | ![preview 2](68/preview_2.png) | ![preview 3](68/preview_3.png) | ![preview 4](68/preview_4.png) | ![preview 5](68/preview_5.png) | ![preview 6](68/preview_6.png) | ![preview 7](68/preview_7.png) | ![preview 8](68/preview_8.png) | | 69 | 24 | [Download](69/dataset.zip) | ![preview 1](69/preview_1.png) | ![preview 2](69/preview_2.png) | ![preview 3](69/preview_3.png) | ![preview 4](69/preview_4.png) | ![preview 5](69/preview_5.png) | ![preview 6](69/preview_6.png) | ![preview 7](69/preview_7.png) | ![preview 8](69/preview_8.png) | | 70 | 6 | [Download](70/dataset.zip) | ![preview 1](70/preview_1.png) | ![preview 2](70/preview_2.png) | ![preview 3](70/preview_3.png) | ![preview 4](70/preview_4.png) | ![preview 5](70/preview_5.png) | ![preview 6](70/preview_6.png) | N/A | N/A | | 71 | 12 | [Download](71/dataset.zip) | ![preview 1](71/preview_1.png) | ![preview 2](71/preview_2.png) | ![preview 3](71/preview_3.png) | ![preview 4](71/preview_4.png) | ![preview 5](71/preview_5.png) | ![preview 6](71/preview_6.png) | ![preview 7](71/preview_7.png) | ![preview 8](71/preview_8.png) | | 72 | 10 | [Download](72/dataset.zip) | ![preview 1](72/preview_1.png) | ![preview 2](72/preview_2.png) | ![preview 3](72/preview_3.png) | ![preview 4](72/preview_4.png) | ![preview 5](72/preview_5.png) | ![preview 6](72/preview_6.png) | ![preview 7](72/preview_7.png) | ![preview 8](72/preview_8.png) | | 73 | 10 | [Download](73/dataset.zip) | ![preview 1](73/preview_1.png) | ![preview 2](73/preview_2.png) | ![preview 3](73/preview_3.png) | ![preview 4](73/preview_4.png) | ![preview 5](73/preview_5.png) | ![preview 6](73/preview_6.png) | ![preview 7](73/preview_7.png) | ![preview 8](73/preview_8.png) | | 74 | 27 | [Download](74/dataset.zip) | ![preview 1](74/preview_1.png) | ![preview 2](74/preview_2.png) | ![preview 3](74/preview_3.png) | ![preview 4](74/preview_4.png) | ![preview 5](74/preview_5.png) | ![preview 6](74/preview_6.png) | ![preview 7](74/preview_7.png) | ![preview 8](74/preview_8.png) | | 75 | 6 | [Download](75/dataset.zip) | ![preview 1](75/preview_1.png) | ![preview 2](75/preview_2.png) | ![preview 3](75/preview_3.png) | ![preview 4](75/preview_4.png) | ![preview 5](75/preview_5.png) | ![preview 6](75/preview_6.png) | N/A | N/A | | 76 | 103 | [Download](76/dataset.zip) | ![preview 1](76/preview_1.png) | ![preview 2](76/preview_2.png) | ![preview 3](76/preview_3.png) | ![preview 4](76/preview_4.png) | ![preview 5](76/preview_5.png) | ![preview 6](76/preview_6.png) | ![preview 7](76/preview_7.png) | ![preview 8](76/preview_8.png) | | noise | 207 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/demonslayer
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-03T03:48:08+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-03T07:11:22+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Demon Slayer ================================== This is the image base of bangumi Demon Slayer, we detected 78 characters, 5890 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
e0715f8564c678326dad400b454c6cba209ea57b
# Dataset Card for "pianofor-ai-masked-v3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
roszcz/pianofor-ai-masked-v3
[ "region:us" ]
2023-10-03T04:13:08+00:00
{"dataset_info": {"features": [{"name": "pitch", "sequence": "int8", "length": 90}, {"name": "start", "sequence": "float64", "length": 90}, {"name": "dstart", "sequence": "float64", "length": 90}, {"name": "end", "sequence": "float64", "length": 90}, {"name": "duration", "sequence": "float64", "length": 90}, {"name": "velocity", "sequence": "int8", "length": 90}, {"name": "source", "dtype": "string"}, {"name": "masking_space", "struct": [{"name": "<Random Mask>", "sequence": "bool", "length": 90}, {"name": "<LH Mask>", "sequence": "bool", "length": 90}, {"name": "<RH Mask>", "sequence": "bool", "length": 90}, {"name": "<Harmonic Root Mask>", "sequence": "bool", "length": 90}, {"name": "<Harmonic Outliers Mask>", "sequence": "bool", "length": 90}]}], "splits": [{"name": "train", "num_bytes": 18556593981, "num_examples": 5475939}], "download_size": 18858529237, "dataset_size": 18556593981}}
2023-10-03T05:40:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "pianofor-ai-masked-v3" More Information needed
[ "# Dataset Card for \"pianofor-ai-masked-v3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"pianofor-ai-masked-v3\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"pianofor-ai-masked-v3\"\n\nMore Information needed" ]
c3774389c9ea0ffd57afb1eec94a4ade1a134bd5
# Dataset Card for "azaria-mitchell-diff-filtered-2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
notrichardren/azaria-mitchell-diff-filtered-2
[ "region:us" ]
2023-10-03T04:22:52+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "cities", "path": "data/cities-*"}, {"split": "companies", "path": "data/companies-*"}, {"split": "animals", "path": "data/animals-*"}, {"split": "elements", "path": "data/elements-*"}, {"split": "inventions", "path": "data/inventions-*"}, {"split": "facts", "path": "data/facts-*"}]}], "dataset_info": {"features": [{"name": "claim", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "dataset", "dtype": "string"}, {"name": "qa_type", "dtype": "int64"}, {"name": "ind", "dtype": "int64"}], "splits": [{"name": "cities", "num_bytes": 311504, "num_examples": 4416}, {"name": "companies", "num_bytes": 86125, "num_examples": 777}, {"name": "animals", "num_bytes": 60222, "num_examples": 692}, {"name": "elements", "num_bytes": 52499, "num_examples": 636}, {"name": "inventions", "num_bytes": 49480, "num_examples": 594}, {"name": "facts", "num_bytes": 43529, "num_examples": 472}], "download_size": 209164, "dataset_size": 603359}}
2023-10-03T04:22:58+00:00
[]
[]
TAGS #region-us
# Dataset Card for "azaria-mitchell-diff-filtered-2" More Information needed
[ "# Dataset Card for \"azaria-mitchell-diff-filtered-2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"azaria-mitchell-diff-filtered-2\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"azaria-mitchell-diff-filtered-2\"\n\nMore Information needed" ]
42d3974bb8a11f32a89f9a3030998c18eea6bc1f
# Dataset Card for "sample_dataset1_1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tanvirsrbd1/sample_dataset1_1
[ "region:us" ]
2023-10-03T04:23:24+00:00
{"dataset_info": {"features": [{"name": "html", "dtype": "string"}, {"name": "response", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1837883, "num_examples": 2980}], "download_size": 607662, "dataset_size": 1837883}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-03T04:23:29+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sample_dataset1_1" More Information needed
[ "# Dataset Card for \"sample_dataset1_1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sample_dataset1_1\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"sample_dataset1_1\"\n\nMore Information needed" ]
66e93a87f548e4f67888ac9708aa7755d8d5590c
# Dataset Card for "ecommerce_purchase_history_v2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
jangmin/ecommerce_purchase_history_v2
[ "region:us" ]
2023-10-03T04:30:41+00:00
{"dataset_info": {"features": [{"name": "user_id", "dtype": "int64"}, {"name": "day", "dtype": "string"}, {"name": "order_ts", "dtype": "string"}, {"name": "positive_prod_id", "dtype": "int64"}, {"name": "negative_prod_id", "dtype": "int64"}, {"name": "chosen", "dtype": "string"}, {"name": "rejected", "dtype": "string"}, {"name": "effective_order_infos", "list": {"list": [{"name": "contents", "list": [{"name": "category_id", "dtype": "int64"}, {"name": "product_id", "dtype": "int64"}, {"name": "text", "dtype": "string"}]}, {"name": "order_id", "dtype": "string"}, {"name": "order_ts", "dtype": "timestamp[us]"}]}}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 193522291, "num_examples": 86264}, {"name": "test", "num_bytes": 74028559, "num_examples": 21566}, {"name": "conservative_test", "num_bytes": 40121578, "num_examples": 8236}], "download_size": 44200184, "dataset_size": 307672428}}
2023-10-03T04:31:00+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ecommerce_purchase_history_v2" More Information needed
[ "# Dataset Card for \"ecommerce_purchase_history_v2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ecommerce_purchase_history_v2\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ecommerce_purchase_history_v2\"\n\nMore Information needed" ]
5793f9027f1d23042b31a25ebc8ec11b25ab5d24
# Dataset Card for "sst2_affix" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
joey234/sst2_affix
[ "region:us" ]
2023-10-03T05:09:27+00:00
{"dataset_info": {"features": [{"name": "idx", "dtype": "int32"}, {"name": "sentence", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "negative", "1": "positive"}}}}, {"name": "words_with_affixes", "sequence": "string"}], "splits": [{"name": "validation", "num_bytes": 22640, "num_examples": 146}], "download_size": 19044, "dataset_size": 22640}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-10-03T05:09:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sst2_affix" More Information needed
[ "# Dataset Card for \"sst2_affix\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sst2_affix\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"sst2_affix\"\n\nMore Information needed" ]
9aa01e83cb8164df97cd9c436253f5cc52e0229a
# Dataset Card for "imdb_affix" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
joey234/imdb_affix
[ "region:us" ]
2023-10-03T05:18:24+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "neg", "1": "pos"}}}}, {"name": "words_with_affixes", "dtype": "null"}], "splits": [{"name": "test"}], "download_size": 1015, "dataset_size": 0}, "configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}]}
2023-11-17T01:12:28+00:00
[]
[]
TAGS #region-us
# Dataset Card for "imdb_affix" More Information needed
[ "# Dataset Card for \"imdb_affix\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"imdb_affix\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"imdb_affix\"\n\nMore Information needed" ]
e0e956623cc3e98c51c91088187bf8527ca83ada
# Dataset Card for "llama2_Chat_trainingsetv2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
SniiKz/llama2_Chat_trainingsetv2
[ "region:us" ]
2023-10-03T05:37:05+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 837513, "num_examples": 2645}], "download_size": 196452, "dataset_size": 837513}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-03T05:37:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "llama2_Chat_trainingsetv2" More Information needed
[ "# Dataset Card for \"llama2_Chat_trainingsetv2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"llama2_Chat_trainingsetv2\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"llama2_Chat_trainingsetv2\"\n\nMore Information needed" ]
e04ec05c9b82a3c9ac35342bb1ea8f8778902247
# Dataset Card for "rotten_tomatoes_affix" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
joey234/rotten_tomatoes_affix
[ "region:us" ]
2023-10-03T05:38:41+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "neg", "1": "pos"}}}}, {"name": "words_with_affixes", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 32292, "num_examples": 194}], "download_size": 24662, "dataset_size": 32292}, "configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}]}
2023-10-03T05:38:46+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rotten_tomatoes_affix" More Information needed
[ "# Dataset Card for \"rotten_tomatoes_affix\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rotten_tomatoes_affix\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rotten_tomatoes_affix\"\n\nMore Information needed" ]
7082413562586d26a98f2a05aa558ad7b780495a
# Dataset Card for "tweet_eval_affix" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
joey234/tweet_eval_affix
[ "region:us" ]
2023-10-03T05:42:16+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "negative", "1": "neutral", "2": "positive"}}}}, {"name": "words_with_affixes", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 137916, "num_examples": 1060}], "download_size": 95675, "dataset_size": 137916}, "configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}]}
2023-10-03T05:42:22+00:00
[]
[]
TAGS #region-us
# Dataset Card for "tweet_eval_affix" More Information needed
[ "# Dataset Card for \"tweet_eval_affix\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"tweet_eval_affix\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"tweet_eval_affix\"\n\nMore Information needed" ]
dd6e43f4b089378aa82439da98150c400675e863
### Dataset info #### Training Dataset: You are provided with a large number of Wikipedia comments which have been labeled by human raters for toxic behavior. The types of toxicity are: - toxic - severe_toxic - obscene - threat - insult - identity_hate The original dataset can be found here: [jigsaw_toxic_classification](https://www.kaggle.com/competitions/jigsaw-toxic-comment-classification-challenge/data) Our training dataset is a sampled version from the original dataset, <b>containing equal number of samples for both clean and toxic classes. </b><br> #### Dataset creation: <code><pre>data = pd.read_csv('train.csv') # train.csv from the original dataset column_names = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'] column_labels = data[column_names][2:-1] train_toxic = data[data[column_names].sum(axis=1) > 0] train_clean = data[data[column_names].sum(axis=1) == 0] train_clean_sampled = train_clean.sample(n=16225, random_state=42) dataframe = pd.concat([train_toxic, train_clean_sampled], axis=0) dataframe = dataframe.sample(frac=1, random_state=42) dataset = Dataset.from_pandas(dataframe) train_dataset = dataset.train_test_split(test_size=0.2)['train'] val_dataset = dataset.train_test_split(test_size=0.2)['test']</pre></code> ### Caution: This dataset contains comments that are toxic in nature. Kindly use appropriately. ### Citation <pre> @misc{jigsaw-toxic-comment-classification-challenge, author = {cjadams, Jeffrey Sorensen, Julia Elliott, Lucas Dixon, Mark McDonald, nithum, Will Cukierski}, title = {Toxic Comment Classification Challenge}, publisher = {Kaggle}, year = {2017}, url = {https://kaggle.com/competitions/jigsaw-toxic-comment-classification-challenge} }</pre>
Arsive/toxicity_classification_jigsaw
[ "task_categories:text-classification", "size_categories:1K<n<200K", "language:en", "license:apache-2.0", "region:us" ]
2023-10-03T05:51:48+00:00
{"language": ["en"], "license": "apache-2.0", "size_categories": ["1K<n<200K"], "task_categories": ["text-classification"]}
2023-10-03T11:51:28+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #size_categories-1K<n<200K #language-English #license-apache-2.0 #region-us
### Dataset info #### Training Dataset: You are provided with a large number of Wikipedia comments which have been labeled by human raters for toxic behavior. The types of toxicity are: - toxic - severe_toxic - obscene - threat - insult - identity_hate The original dataset can be found here: jigsaw_toxic_classification Our training dataset is a sampled version from the original dataset, <b>containing equal number of samples for both clean and toxic classes. </b><br> #### Dataset creation: <code><pre>data = pd.read_csv('URL') # URL from the original dataset column_names = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'] column_labels = data[column_names][2:-1] train_toxic = data[data[column_names].sum(axis=1) > 0] train_clean = data[data[column_names].sum(axis=1) == 0] train_clean_sampled = train_clean.sample(n=16225, random_state=42) dataframe = URL([train_toxic, train_clean_sampled], axis=0) dataframe = URL(frac=1, random_state=42) dataset = Dataset.from_pandas(dataframe) train_dataset = dataset.train_test_split(test_size=0.2)['train'] val_dataset = dataset.train_test_split(test_size=0.2)['test']</pre></code> ### Caution: This dataset contains comments that are toxic in nature. Kindly use appropriately. <pre> @misc{jigsaw-toxic-comment-classification-challenge, author = {cjadams, Jeffrey Sorensen, Julia Elliott, Lucas Dixon, Mark McDonald, nithum, Will Cukierski}, title = {Toxic Comment Classification Challenge}, publisher = {Kaggle}, year = {2017}, url = {URL }</pre>
[ "### Dataset info", "#### Training Dataset: \nYou are provided with a large number of Wikipedia comments which have been labeled by human raters for toxic behavior. The types of toxicity are:\n\n- toxic\n- severe_toxic\n- obscene\n- threat\n- insult\n- identity_hate\n\nThe original dataset can be found here: jigsaw_toxic_classification\n\nOur training dataset is a sampled version from the original dataset, <b>containing equal number of samples for both clean and toxic classes. </b><br>", "#### Dataset creation:\n<code><pre>data = pd.read_csv('URL') # URL from the original dataset\ncolumn_names = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']\ncolumn_labels = data[column_names][2:-1]\ntrain_toxic = data[data[column_names].sum(axis=1) > 0]\ntrain_clean = data[data[column_names].sum(axis=1) == 0]\ntrain_clean_sampled = train_clean.sample(n=16225, random_state=42)\n\ndataframe = URL([train_toxic, train_clean_sampled], axis=0)\n\ndataframe = URL(frac=1, random_state=42)\ndataset = Dataset.from_pandas(dataframe)\n\ntrain_dataset = dataset.train_test_split(test_size=0.2)['train']\nval_dataset = dataset.train_test_split(test_size=0.2)['test']</pre></code>", "### Caution:\nThis dataset contains comments that are toxic in nature. Kindly use appropriately.\n\n<pre>\n @misc{jigsaw-toxic-comment-classification-challenge,\n author = {cjadams, Jeffrey Sorensen, Julia Elliott, Lucas Dixon, Mark McDonald, nithum, Will Cukierski},\n title = {Toxic Comment Classification Challenge},\n publisher = {Kaggle},\n year = {2017},\n url = {URL\n}</pre>" ]
[ "TAGS\n#task_categories-text-classification #size_categories-1K<n<200K #language-English #license-apache-2.0 #region-us \n", "### Dataset info", "#### Training Dataset: \nYou are provided with a large number of Wikipedia comments which have been labeled by human raters for toxic behavior. The types of toxicity are:\n\n- toxic\n- severe_toxic\n- obscene\n- threat\n- insult\n- identity_hate\n\nThe original dataset can be found here: jigsaw_toxic_classification\n\nOur training dataset is a sampled version from the original dataset, <b>containing equal number of samples for both clean and toxic classes. </b><br>", "#### Dataset creation:\n<code><pre>data = pd.read_csv('URL') # URL from the original dataset\ncolumn_names = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']\ncolumn_labels = data[column_names][2:-1]\ntrain_toxic = data[data[column_names].sum(axis=1) > 0]\ntrain_clean = data[data[column_names].sum(axis=1) == 0]\ntrain_clean_sampled = train_clean.sample(n=16225, random_state=42)\n\ndataframe = URL([train_toxic, train_clean_sampled], axis=0)\n\ndataframe = URL(frac=1, random_state=42)\ndataset = Dataset.from_pandas(dataframe)\n\ntrain_dataset = dataset.train_test_split(test_size=0.2)['train']\nval_dataset = dataset.train_test_split(test_size=0.2)['test']</pre></code>", "### Caution:\nThis dataset contains comments that are toxic in nature. Kindly use appropriately.\n\n<pre>\n @misc{jigsaw-toxic-comment-classification-challenge,\n author = {cjadams, Jeffrey Sorensen, Julia Elliott, Lucas Dixon, Mark McDonald, nithum, Will Cukierski},\n title = {Toxic Comment Classification Challenge},\n publisher = {Kaggle},\n year = {2017},\n url = {URL\n}</pre>" ]
[ 41, 5, 114, 287, 114 ]
[ "passage: TAGS\n#task_categories-text-classification #size_categories-1K<n<200K #language-English #license-apache-2.0 #region-us \n### Dataset info#### Training Dataset: \nYou are provided with a large number of Wikipedia comments which have been labeled by human raters for toxic behavior. The types of toxicity are:\n\n- toxic\n- severe_toxic\n- obscene\n- threat\n- insult\n- identity_hate\n\nThe original dataset can be found here: jigsaw_toxic_classification\n\nOur training dataset is a sampled version from the original dataset, <b>containing equal number of samples for both clean and toxic classes. </b><br>#### Dataset creation:\n<code><pre>data = pd.read_csv('URL') # URL from the original dataset\ncolumn_names = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']\ncolumn_labels = data[column_names][2:-1]\ntrain_toxic = data[data[column_names].sum(axis=1) > 0]\ntrain_clean = data[data[column_names].sum(axis=1) == 0]\ntrain_clean_sampled = train_clean.sample(n=16225, random_state=42)\n\ndataframe = URL([train_toxic, train_clean_sampled], axis=0)\n\ndataframe = URL(frac=1, random_state=42)\ndataset = Dataset.from_pandas(dataframe)\n\ntrain_dataset = dataset.train_test_split(test_size=0.2)['train']\nval_dataset = dataset.train_test_split(test_size=0.2)['test']</pre></code>" ]
24f60888b9ba7642648586a21b19a27408a61878
Safesearch v5.0, which uses an innovative EfficientNetV2.5 architecture, will be released soon, along with the benchmark CSV file containing all image URLs, Google Safesearch predictions, AIstrova Safesearch predictions, and true labels. This benchmark (validation set) has been reviewed multiple times, using only commonly accepted definitions of safe and unsafe content to minimize bias. However, it may still contain image labels that are controversial due to the subjective nature of this task. I have done my best to reduce the chance of incorrect Google Safesearch labels using [this Google Image search method](./google_image.js). The logic goes like this: 1. Enter a search term 2. Search for the top 400 strictly safe images 3. Search for the top 200 images with safesearch turned off 4. Filter the safe images obtained in #2 from #3 to get strictly NSFW images The reason I double the safe images is to minimize false positives. Google Image search results may change slightly over time. ## Nov 5th, 2023 Update Updated transforms function, by adding `RandomResizedCrop`, `ColorJitter`, and probabilities `p` (array) for `RandomChoice` selections. Finally, after 70 days of research, testing, and experiments, I invented a new architecture and training algorithm that can significantly outperform Google Safesearch's performance on this benchmark **after training for 2 out of 8 epochs**. This was the benchmark result at 299x299 resolution, after one epoch of training: - C (84.337%) - M (80.357%) - A (78.700%) - D (87.500%) After two epochs of training: - C (85.542%) - M (87.500%) - A (91.697%) - D (80.435%) Thoughts: - Models were usually trained for 8 epochs in Sept and Oct. Such a high pattern recognition capability in the first two epochs is unprecedented among all experiments and testing done from Aug to Oct, 2023. - The training accuracies of this model are below average among all the image classification models (that my 24 GB VRAM can handle) to date. This is a good sign, because it's clear that this model is much less likely to overfit. I will release this model once I confirm that this new one can break the Sept 30th record. ## Oct 30th, 2023 Update So far, no new model architecture could break the Sept 30th record. It's very difficult to evaluate the Bing, Google, and AIstrova Safesearch algorithms together, but I can give rough estimates in the form of a range for Bing's performance on this benchmark. ## Sept 30th, 2023 Update *I'm creating and testing new architectures to find an even more suitable model for this task. The table below only shows the best result so far as of Sept 30th, 2023.* | Model Name | Benchmark Subset | Accuracy | Test Samples Directly from Google Images | Challenge | |---------------------|----------------------------------------------|--------------|------------------------------------------|--------------------------------------------------| | AIstrova Safesearch v5.0 | Clothing (hentai vs safe waifu) | **88.755%** | 249 | Ability to classify hentai vs. safe waifu content, even if it's on an unusual format like shirt prints | | Google Safesearch |----------------------------------------------| 55.422% |------------------------------------------|--------------------------------------------------| | AIstrova Safesearch v5.0 | Movie Scenes & Video Games (graphic vs safe content) | **90.179%** | 224 | Ability to understand the nuanced differences between small injuries, horror, gory, and graphic content | | Google Safesearch |----------------------------------------------| 69.196% |------------------------------------------|--------------------------------------------------| | AIstrova Safesearch v5.0 | African Women (suggestive vs sexy) | **93.141%** | 277 | Ability to understand nuanced differences between sexy and sexually suggestive photos and make unbiased predictions, by training on a dataset with almost no African people | | Google Safesearch |----------------------------------------------| 77.617% |------------------------------------------|--------------------------------------------------| | AIstrova Safesearch v5.0 | Drawings (nudity vs safe) | **90.217%** | 184 | Ability to generalize on artworks with less than 100 artworks in the training data | | Google Safesearch |----------------------------------------------| 79.891% |------------------------------------------|--------------------------------------------------|
aistrova/cmad
[ "license:cc-by-nc-sa-4.0", "region:us" ]
2023-10-03T06:15:04+00:00
{"license": "cc-by-nc-sa-4.0"}
2023-11-05T21:48:21+00:00
[]
[]
TAGS #license-cc-by-nc-sa-4.0 #region-us
Safesearch v5.0, which uses an innovative EfficientNetV2.5 architecture, will be released soon, along with the benchmark CSV file containing all image URLs, Google Safesearch predictions, AIstrova Safesearch predictions, and true labels. This benchmark (validation set) has been reviewed multiple times, using only commonly accepted definitions of safe and unsafe content to minimize bias. However, it may still contain image labels that are controversial due to the subjective nature of this task. I have done my best to reduce the chance of incorrect Google Safesearch labels using this Google Image search method. The logic goes like this: 1. Enter a search term 2. Search for the top 400 strictly safe images 3. Search for the top 200 images with safesearch turned off 4. Filter the safe images obtained in #2 from #3 to get strictly NSFW images The reason I double the safe images is to minimize false positives. Google Image search results may change slightly over time. Nov 5th, 2023 Update -------------------- Updated transforms function, by adding 'RandomResizedCrop', 'ColorJitter', and probabilities 'p' (array) for 'RandomChoice' selections. Finally, after 70 days of research, testing, and experiments, I invented a new architecture and training algorithm that can significantly outperform Google Safesearch's performance on this benchmark after training for 2 out of 8 epochs. This was the benchmark result at 299x299 resolution, after one epoch of training: * C (84.337%) * M (80.357%) * A (78.700%) * D (87.500%) After two epochs of training: * C (85.542%) * M (87.500%) * A (91.697%) * D (80.435%) Thoughts: * Models were usually trained for 8 epochs in Sept and Oct. Such a high pattern recognition capability in the first two epochs is unprecedented among all experiments and testing done from Aug to Oct, 2023. * The training accuracies of this model are below average among all the image classification models (that my 24 GB VRAM can handle) to date. This is a good sign, because it's clear that this model is much less likely to overfit. I will release this model once I confirm that this new one can break the Sept 30th record. Oct 30th, 2023 Update --------------------- So far, no new model architecture could break the Sept 30th record. It's very difficult to evaluate the Bing, Google, and AIstrova Safesearch algorithms together, but I can give rough estimates in the form of a range for Bing's performance on this benchmark. Sept 30th, 2023 Update ---------------------- *I'm creating and testing new architectures to find an even more suitable model for this task. The table below only shows the best result so far as of Sept 30th, 2023.*
[]
[ "TAGS\n#license-cc-by-nc-sa-4.0 #region-us \n" ]
[ 19 ]
[ "passage: TAGS\n#license-cc-by-nc-sa-4.0 #region-us \n" ]
7771f62f55074f215f02f609adbdf90c802a4fed
# NCTCRCHE100K Dataset Card # Citation ```bash Kather, Jakob Nikolas, Halama, Niels, & Marx, Alexander. (2018). 100,000 histological images of human colorectal cancer and healthy tissue (v0.1) [Data set]. Zenodo. https://doi.org/10.5281/zenodo.1214456 ``` # Description This is a set of 100,000 non-overlapping image patches from hematoxylin & eosin (H&E) stained histological images of human colorectal cancer (CRC) and normal tissue. All images are 224x224 pixels (px) at 0.5 microns per pixel (MPP). All images are color-normalized using Macenko's method (http://ieeexplore.ieee.org/abstract/document/5193250/, DOI 10.1109/ISBI.2009.5193250). Tissue classes are: Adipose (ADI), background (BACK), debris (DEB), lymphocytes (LYM), mucus (MUC), smooth muscle (MUS), normal colon mucosa (NORM), cancer-associated stroma (STR), colorectal adenocarcinoma epithelium (TUM). These images were manually extracted from N=86 H&E stained human cancer tissue slides from formalin-fixed paraffin-embedded (FFPE) samples from the NCT Biobank (National Center for Tumor Diseases, Heidelberg, Germany) and the UMM pathology archive (University Medical Center Mannheim, Mannheim, Germany). Tissue samples contained CRC primary tumor slides and tumor tissue from CRC liver metastases; normal tissue classes were augmented with non-tumorous regions from gastrectomy specimen to increase variability. ### Data Structure The dataset is structured into training splits (100,000 "train" and 100,000 "train_nonorm" samples) as well as a validation split of 7180 samples. ## Setup Instructions ```bash from torch.utils.data import DataLoader from torchvision.transforms import ToTensor def transform(data): data["image"] = [ToTensor()(img) for img in data["image"]] # convert to torch.Tensor return data from datasets import load_dataset ds_train = load_dataset("DykeF/NCTCRCHE100K", split="train") # or train_nonorm or validation ds_train.set_transform(transform)
DykeF/NCTCRCHE100K
[ "license:cc-by-4.0", "region:us" ]
2023-10-03T06:42:24+00:00
{"license": "cc-by-4.0"}
2023-10-04T18:37:15+00:00
[]
[]
TAGS #license-cc-by-4.0 #region-us
# NCTCRCHE100K Dataset Card # Description This is a set of 100,000 non-overlapping image patches from hematoxylin & eosin (H&E) stained histological images of human colorectal cancer (CRC) and normal tissue. All images are 224x224 pixels (px) at 0.5 microns per pixel (MPP). All images are color-normalized using Macenko's method (URL DOI 10.1109/ISBI.2009.5193250). Tissue classes are: Adipose (ADI), background (BACK), debris (DEB), lymphocytes (LYM), mucus (MUC), smooth muscle (MUS), normal colon mucosa (NORM), cancer-associated stroma (STR), colorectal adenocarcinoma epithelium (TUM). These images were manually extracted from N=86 H&E stained human cancer tissue slides from formalin-fixed paraffin-embedded (FFPE) samples from the NCT Biobank (National Center for Tumor Diseases, Heidelberg, Germany) and the UMM pathology archive (University Medical Center Mannheim, Mannheim, Germany). Tissue samples contained CRC primary tumor slides and tumor tissue from CRC liver metastases; normal tissue classes were augmented with non-tumorous regions from gastrectomy specimen to increase variability. ### Data Structure The dataset is structured into training splits (100,000 "train" and 100,000 "train_nonorm" samples) as well as a validation split of 7180 samples. ## Setup Instructions '''bash from URL import DataLoader from torchvision.transforms import ToTensor def transform(data): data["image"] = [ToTensor()(img) for img in data["image"]] # convert to torch.Tensor return data from datasets import load_dataset ds_train = load_dataset("DykeF/NCTCRCHE100K", split="train") # or train_nonorm or validation ds_train.set_transform(transform)
[ "# NCTCRCHE100K Dataset Card", "# Description\nThis is a set of 100,000 non-overlapping image patches from hematoxylin & eosin (H&E) stained histological images of human colorectal cancer (CRC) and normal tissue.\nAll images are 224x224 pixels (px) at 0.5 microns per pixel (MPP). All images are color-normalized using Macenko's method (URL DOI 10.1109/ISBI.2009.5193250).\nTissue classes are: Adipose (ADI), background (BACK), debris (DEB), lymphocytes (LYM), mucus (MUC), smooth muscle (MUS), normal colon mucosa (NORM), cancer-associated stroma (STR), colorectal adenocarcinoma epithelium (TUM).\nThese images were manually extracted from N=86 H&E stained human cancer tissue slides from formalin-fixed paraffin-embedded (FFPE) samples from the NCT Biobank (National Center for Tumor Diseases, Heidelberg, Germany) and the UMM pathology archive (University Medical Center Mannheim, Mannheim, Germany). Tissue samples contained CRC primary tumor slides and tumor tissue from CRC liver metastases; normal tissue classes were augmented with non-tumorous regions from gastrectomy specimen to increase variability.", "### Data Structure\nThe dataset is structured into training splits (100,000 \"train\" and 100,000 \"train_nonorm\" samples) as well as a validation split of 7180 samples.", "## Setup Instructions\n\n'''bash\nfrom URL import DataLoader\nfrom torchvision.transforms import ToTensor\n\ndef transform(data): \n data[\"image\"] = [ToTensor()(img) for img in data[\"image\"]] # convert to torch.Tensor\n return data\n\nfrom datasets import load_dataset\nds_train = load_dataset(\"DykeF/NCTCRCHE100K\", split=\"train\") # or train_nonorm or validation\nds_train.set_transform(transform)" ]
[ "TAGS\n#license-cc-by-4.0 #region-us \n", "# NCTCRCHE100K Dataset Card", "# Description\nThis is a set of 100,000 non-overlapping image patches from hematoxylin & eosin (H&E) stained histological images of human colorectal cancer (CRC) and normal tissue.\nAll images are 224x224 pixels (px) at 0.5 microns per pixel (MPP). All images are color-normalized using Macenko's method (URL DOI 10.1109/ISBI.2009.5193250).\nTissue classes are: Adipose (ADI), background (BACK), debris (DEB), lymphocytes (LYM), mucus (MUC), smooth muscle (MUS), normal colon mucosa (NORM), cancer-associated stroma (STR), colorectal adenocarcinoma epithelium (TUM).\nThese images were manually extracted from N=86 H&E stained human cancer tissue slides from formalin-fixed paraffin-embedded (FFPE) samples from the NCT Biobank (National Center for Tumor Diseases, Heidelberg, Germany) and the UMM pathology archive (University Medical Center Mannheim, Mannheim, Germany). Tissue samples contained CRC primary tumor slides and tumor tissue from CRC liver metastases; normal tissue classes were augmented with non-tumorous regions from gastrectomy specimen to increase variability.", "### Data Structure\nThe dataset is structured into training splits (100,000 \"train\" and 100,000 \"train_nonorm\" samples) as well as a validation split of 7180 samples.", "## Setup Instructions\n\n'''bash\nfrom URL import DataLoader\nfrom torchvision.transforms import ToTensor\n\ndef transform(data): \n data[\"image\"] = [ToTensor()(img) for img in data[\"image\"]] # convert to torch.Tensor\n return data\n\nfrom datasets import load_dataset\nds_train = load_dataset(\"DykeF/NCTCRCHE100K\", split=\"train\") # or train_nonorm or validation\nds_train.set_transform(transform)" ]
[ 15, 10, 306, 50, 127 ]
[ "passage: TAGS\n#license-cc-by-4.0 #region-us \n# NCTCRCHE100K Dataset Card# Description\nThis is a set of 100,000 non-overlapping image patches from hematoxylin & eosin (H&E) stained histological images of human colorectal cancer (CRC) and normal tissue.\nAll images are 224x224 pixels (px) at 0.5 microns per pixel (MPP). All images are color-normalized using Macenko's method (URL DOI 10.1109/ISBI.2009.5193250).\nTissue classes are: Adipose (ADI), background (BACK), debris (DEB), lymphocytes (LYM), mucus (MUC), smooth muscle (MUS), normal colon mucosa (NORM), cancer-associated stroma (STR), colorectal adenocarcinoma epithelium (TUM).\nThese images were manually extracted from N=86 H&E stained human cancer tissue slides from formalin-fixed paraffin-embedded (FFPE) samples from the NCT Biobank (National Center for Tumor Diseases, Heidelberg, Germany) and the UMM pathology archive (University Medical Center Mannheim, Mannheim, Germany). Tissue samples contained CRC primary tumor slides and tumor tissue from CRC liver metastases; normal tissue classes were augmented with non-tumorous regions from gastrectomy specimen to increase variability.### Data Structure\nThe dataset is structured into training splits (100,000 \"train\" and 100,000 \"train_nonorm\" samples) as well as a validation split of 7180 samples." ]
5a9a497aa0fd2df1f6817af6c6b9904c2b37318d
# Dataset Card for "viettel_v3.1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
nguyenthanhdo/viettel_v3.1
[ "region:us" ]
2023-10-03T06:52:24+00:00
{"dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "translated", "dtype": "bool"}, {"name": "output_len", "dtype": "int64"}, {"name": "source", "dtype": "string"}, {"name": "input", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 314243226.0, "num_examples": 90000}], "download_size": 151381354, "dataset_size": 314243226.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-03T06:52:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for "viettel_v3.1" More Information needed
[ "# Dataset Card for \"viettel_v3.1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"viettel_v3.1\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"viettel_v3.1\"\n\nMore Information needed" ]
71f5164c454d6bde86ee0b77b5c0c1e16ac209b2
# Dataset Card for "Soldering-Data-pix2pix-1001" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ouvic215/Soldering-Data-pix2pix-1001
[ "region:us" ]
2023-10-03T06:56:45+00:00
{"dataset_info": {"features": [{"name": "mask_image", "dtype": "image"}, {"name": "text", "dtype": "string"}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 961523307.5, "num_examples": 12054}], "download_size": 960371764, "dataset_size": 961523307.5}}
2023-10-03T07:01:47+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Soldering-Data-pix2pix-1001" More Information needed
[ "# Dataset Card for \"Soldering-Data-pix2pix-1001\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Soldering-Data-pix2pix-1001\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"Soldering-Data-pix2pix-1001\"\n\nMore Information needed" ]
896e3acd3cb1c218928f12666977e6d0cb09c04e
# Dataset Card for "instructpix2pix-clip-filtered200-samples" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
FelixdoingAI/IP2P-200
[ "region:us" ]
2023-10-03T07:07:02+00:00
{"dataset_info": {"features": [{"name": "original_prompt", "dtype": "string"}, {"name": "original_image", "dtype": "image"}, {"name": "edit_prompt", "dtype": "string"}, {"name": "edited_prompt", "dtype": "string"}, {"name": "edited_image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 17732714.0, "num_examples": 200}], "download_size": 17730243, "dataset_size": 17732714.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-03T07:07:19+00:00
[]
[]
TAGS #region-us
# Dataset Card for "instructpix2pix-clip-filtered200-samples" More Information needed
[ "# Dataset Card for \"instructpix2pix-clip-filtered200-samples\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"instructpix2pix-clip-filtered200-samples\"\n\nMore Information needed" ]
[ 6, 24 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"instructpix2pix-clip-filtered200-samples\"\n\nMore Information needed" ]
bc3aace0dde41e422f204587077a1c0a31cc0b3d
# Dataset Card for "eval_tag_squad_v7" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/eval_tag_squad_v7
[ "region:us" ]
2023-10-03T07:07:27+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answers", "sequence": [{"name": "text", "dtype": "string"}, {"name": "answer_start", "dtype": "int32"}]}, {"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 12876477, "num_examples": 10570}, {"name": "validation", "num_bytes": 12876477, "num_examples": 10570}], "download_size": 5563526, "dataset_size": 25752954}}
2023-10-05T16:04:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "eval_tag_squad_v7" More Information needed
[ "# Dataset Card for \"eval_tag_squad_v7\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"eval_tag_squad_v7\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"eval_tag_squad_v7\"\n\nMore Information needed" ]
c9b9a247abaef79c518d58e843f7190432a69999
# Dataset Card for "eval_tag_squad_v8" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/eval_tag_squad_v8
[ "region:us" ]
2023-10-03T07:07:36+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answers", "sequence": [{"name": "text", "dtype": "string"}, {"name": "answer_start", "dtype": "int32"}]}, {"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 13020105, "num_examples": 10570}, {"name": "validation", "num_bytes": 13020105, "num_examples": 10570}], "download_size": 5664930, "dataset_size": 26040210}}
2023-10-05T15:55:19+00:00
[]
[]
TAGS #region-us
# Dataset Card for "eval_tag_squad_v8" More Information needed
[ "# Dataset Card for \"eval_tag_squad_v8\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"eval_tag_squad_v8\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"eval_tag_squad_v8\"\n\nMore Information needed" ]