sha
stringlengths
40
40
text
stringlengths
0
13.4M
id
stringlengths
2
117
tags
list
created_at
stringlengths
25
25
metadata
stringlengths
2
31.7M
last_modified
stringlengths
25
25
3d724a7d3fadd3f887697c70000f3a438b166c51
# Dataset Card for "preprocessed_issues" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
open-source-metrics/preprocessed_issues
[ "region:us" ]
2023-03-24T22:49:09+00:00
{"dataset_info": {"features": [{"name": "diffusers", "dtype": "int64"}, {"name": "accelerate", "dtype": "int64"}, {"name": "chat_ui", "dtype": "int64"}, {"name": "optimum", "dtype": "int64"}, {"name": "pytorch_image_models", "dtype": "int64"}, {"name": "tokenizers", "dtype": "int64"}, {"name": "evaluate", "dtype": "int64"}, {"name": "candle", "dtype": "int64"}, {"name": "text_generation_inference", "dtype": "int64"}, {"name": "safetensors", "dtype": "int64"}, {"name": "gradio", "dtype": "int64"}, {"name": "transformers", "dtype": "int64"}, {"name": "datasets", "dtype": "int64"}, {"name": "hub_docs", "dtype": "int64"}, {"name": "peft", "dtype": "int64"}, {"name": "huggingface_hub", "dtype": "int64"}, {"name": "pytorch", "dtype": "int64"}, {"name": "langchain", "dtype": "int64"}, {"name": "openai_python", "dtype": "int64"}, {"name": "stable_diffusion_webui", "dtype": "int64"}, {"name": "tensorflow", "dtype": "int64"}, {"name": "day", "dtype": "string"}], "splits": [{"name": "raw", "num_bytes": 19652, "num_examples": 101}, {"name": "wow", "num_bytes": 20036, "num_examples": 103}, {"name": "eom", "num_bytes": 19652, "num_examples": 101}, {"name": "eom_wow", "num_bytes": 20036, "num_examples": 103}], "download_size": 77314, "dataset_size": 79376}, "configs": [{"config_name": "default", "data_files": [{"split": "raw", "path": "data/raw-*"}, {"split": "wow", "path": "data/wow-*"}, {"split": "eom", "path": "data/eom-*"}, {"split": "eom_wow", "path": "data/eom_wow-*"}]}]}
2024-02-15T14:43:26+00:00
16272207c564a3839f4f34879ec8b620ba08fcf9
# Dataset Card for "paired_arm_risc" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
celinelee/paired_arm_risc
[ "region:us" ]
2023-03-24T22:53:22+00:00
{"dataset_info": {"features": [{"name": "source", "dtype": "string"}, {"name": "c", "dtype": "string"}, {"name": "risc_o0", "dtype": "string"}, {"name": "risc_o1", "dtype": "string"}, {"name": "risc_o2", "dtype": "string"}, {"name": "risc_o3", "dtype": "string"}, {"name": "arm_o0", "dtype": "string"}, {"name": "arm_o1", "dtype": "string"}, {"name": "arm_o2", "dtype": "string"}, {"name": "arm_o3", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2066153, "num_examples": 40}], "download_size": 791924, "dataset_size": 2066153}}
2023-03-25T00:38:49+00:00
a569c18872c89bf74cc302b3be59e9020709122d
# Dataset Card for the ProofLang Corpus ## Dataset Summary The ProofLang Corpus includes 3.7M proofs (558 million words) mechanically extracted from papers that were posted on [arXiv.org](https://arXiv.org) between 1992 and 2020. The focus of this corpus is proofs, rather than the explanatory text that surrounds them, and more specifically on the *language* used in such proofs. Specific mathematical content is filtered out, resulting in sentences such as `Let MATH be the restriction of MATH to MATH.` This dataset reflects how people prefer to write (non-formalized) proofs, and is also amenable to statistical analyses and experiments with Natural Language Processing (NLP) techniques. We hope it can serve as an aid in the development of language-based proof assistants and proof checkers for professional and educational purposes. ## Dataset Structure There are multiple TSV versions of the data. Primarily, `proofs` divides up the data proof-by-proof, and `sentences` further divides up the same data sentence-by-sentence. The `raw` dataset is a less-cleaned-up version of `proofs`. More usefully, the `tags` dataset gives arXiv subject tags for each paper ID found in the other data files. * The data in `proofs` (and `raw`) consists of a `paper` ID (identifying where the proof was extracted from), and the `proof` as a string. * The data in `sentences` consists of a `paper` ID, and the `sentence` as a string. * The data in `tags` consists of a `paper` ID, and the arXiv subject tags for that paper as a single comma-separated string. Further metadata about papers can be queried from arXiv.org using the paper ID. In particular, each paper `<id>` in the dataset can be accessed online at the url `https://arxiv.org/abs/<id>` ## Dataset Size * `proofs` is 3,094,779,182 bytes (unzipped) and has 3,681,893 examples. * `sentences` is 3,545,309,822 bytes (unzipped) and has 38,899,132 examples. * `tags` is 7,967,839 bytes (unzipped) and has 328,642 rows. * `raw` is 3,178,997,379 bytes (unzipped) and has 3,681,903 examples. ## Dataset Statistics * The average length of `sentences` is 14.1 words. * The average length of `proofs` is 10.5 sentences. ## Dataset Usage Data can be downloaded as (zipped) TSV files. Accessing the data programmatically from Python is also possible using the `Datasets` library. For example, to print the first 10 proofs: ```python from datasets import load_dataset dataset = load_dataset('proofcheck/prooflang', 'proofs', split='train', streaming='True') for d in dataset.take(10): print(d['paper'], d['proof']) ``` To look at individual sentences from the proofs, ```python dataset = load_dataset('proofcheck/prooflang', 'proofs', split='train', streaming='True') for d in dataset.take(10): print(d['paper'], d['sentence']) ``` To get a comma-separated list of arXiv subject tags for each paper, ```python from datasets import load_dataset dataset = load_dataset('proofcheck/prooflang', 'tags', split='train', streaming='True') for d in dataset.take(10): print(d['paper'], d['tags']) ``` Finally, to look at a version of the proofs with less aggressive cleanup (straight from the LaTeX extraction), ```python dataset = load_dataset('proofcheck/prooflang', 'raw', split='train', streaming='True') for d in dataset.take(10): print(d['paper'], d['proof']) ``` ### Data Splits There is currently no train/test split; all the data is in `train`. ## Dataset Creation We started with the LaTeX source of 1.6M papers that were submitted to [arXiv.org](https://arXiv.org) between 1992 and April 2022. The proofs were extracted using a Python script simulating parts of LaTeX (including defining and expanding macros). It does no actual typesetting, throws away output not between `\begin{proof}...\end{proof}`, and skips math content. During extraction, * Math-mode formulas (signalled by `$`, `\begin{equation}`, etc.) become `MATH` * `\ref{...}` and variants (`autoref`, `\subref`, etc.) become `REF` * `\cite{...}` and variants (`\Citet`, `\shortciteNP`, etc.) become `CITE` * Words that appear to be proper names become `NAME` * `\item` becomes `CASE:` We then run a cleanup pass on the extracted proofs that includes * Cleaning up common extraction errors (e.g., due to uninterpreted macros) * Replacing more references by `REF`, e.g., `Theorem 2(a)` or `Postulate (*)` * Replacing more citations with `CITE`, e.g., `Page 47 of CITE` * Replacing more proof-case markers with `CASE:`, e.g., `Case (a).` * Fixing a few common misspellings ## Additional Information This dataset is released under the Creative Commons Attribution 4.0 licence. Copyright for the actual proofs remains with the authors of the papers on [arXiv.org](https://arXiv.org), but these simplified snippets are fair use under US copyright law.
proofcheck/prooflang
[ "task_categories:text-generation", "size_categories:1B<n<10B", "language:en", "license:cc-by-4.0", "region:us" ]
2023-03-24T23:23:54+00:00
{"language": ["en"], "license": "cc-by-4.0", "size_categories": ["1B<n<10B"], "task_categories": ["text-generation"], "pretty_name": "ProofLang Corpus", "dataset_info": [{"config_name": "proofs", "num_bytes": 3197091800, "num_examples": 3681901, "features": [{"name": "fileID", "dtype": "string"}, {"name": "proof", "dtype": "string"}]}, {"config_name": "sentences", "num_bytes": 3736579062, "num_examples": 38899130, "features": [{"name": "fileID", "dtype": "string"}, {"name": "sentence", "dtype": "string"}]}], "download_size": 6933683563, "dataset_size": 6933670862}
2023-06-01T12:35:20+00:00
3c303d42a480794d67f1e1f56085ce57a02b5e1c
# Dataset Card for "10k_test3_xnli_subset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Gwatk/10k_test3_xnli_subset
[ "region:us" ]
2023-03-24T23:50:41+00:00
{"dataset_info": {"features": [{"name": "label", "dtype": {"class_label": {"names": {"0": "entailment", "1": "neutral", "2": "contradiction"}}}}, {"name": "language", "dtype": "string"}, {"name": "choosen_premise", "dtype": "string"}, {"name": "choosen_hypothesis", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2108099, "num_examples": 10000}, {"name": "validation", "num_bytes": 291063, "num_examples": 1500}, {"name": "test", "num_bytes": 384971, "num_examples": 2000}], "download_size": 1867984, "dataset_size": 2784133}}
2023-03-24T23:50:56+00:00
17f82a8e6cf685883ee7ea42ba4efd505f126507
# Dataset Card for "FGVC_Aircraft_test_google_flan_t5_xl_mode_C_A_T_ns_3333" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
CVasNLPExperiments/FGVC_Aircraft_test_google_flan_t5_xl_mode_C_A_T_ns_3333
[ "region:us" ]
2023-03-24T23:52:38+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "prompt", "dtype": "string"}, {"name": "true_label", "dtype": "string"}, {"name": "prediction", "dtype": "string"}], "splits": [{"name": "fewshot_0_clip_tags_ViT_L_14_LLM_Description_gpt3_downstream_tasks_visual_genome_ViT_L_14_clip_tags_ViT_L_14_simple_specific_rices", "num_bytes": 1095883, "num_examples": 3333}, {"name": "fewshot_1_clip_tags_ViT_L_14_LLM_Description_gpt3_downstream_tasks_visual_genome_ViT_L_14_clip_tags_ViT_L_14_simple_specific_rices", "num_bytes": 2101157, "num_examples": 3333}, {"name": "fewshot_3_clip_tags_ViT_L_14_LLM_Description_gpt3_downstream_tasks_visual_genome_ViT_L_14_clip_tags_ViT_L_14_simple_specific_rices", "num_bytes": 4112223, "num_examples": 3333}, {"name": "fewshot_5_clip_tags_ViT_L_14_LLM_Description_gpt3_downstream_tasks_visual_genome_ViT_L_14_clip_tags_ViT_L_14_simple_specific_rices", "num_bytes": 6122037, "num_examples": 3333}], "download_size": 2520627, "dataset_size": 13431300}}
2023-03-25T01:10:25+00:00
7b71a015233945657d9c3cdceba1116cfa869a19
ssssasdasdasdasdqwd/MONET_Claude_LORA
[ "license:unknown", "region:us" ]
2023-03-25T00:23:16+00:00
{"license": "unknown"}
2023-03-29T14:02:14+00:00
0c4014f85c8cd14561917d6b7141c96008a0df76
# Dataset Card for "GTA V Myths" List of Myths in GTA V, extracted from [Caylus's Channel](https://www.youtube.com/watch?v=bKKOBbWy2sQ&ab_channel=Caylus) [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
taesiri/gta-myths
[ "task_categories:text-classification", "size_categories:1K<n<10K", "language:en", "license:mit", "game", "region:us" ]
2023-03-25T01:32:44+00:00
{"language": ["en"], "license": "mit", "size_categories": ["1K<n<10K"], "task_categories": ["text-classification"], "pretty_name": "GTA V Myths", "dataset_info": {"features": [{"name": "Myth", "dtype": "string"}, {"name": "Outcome", "dtype": "string"}, {"name": "Extra", "dtype": "string"}], "splits": [{"name": "validation", "num_bytes": 28122, "num_examples": 453}], "download_size": 15572, "dataset_size": 28122}, "tags": ["game"]}
2023-03-25T04:46:58+00:00
0dddaeed800e2d28dc5bf63fdad360bc4a858b01
# Dataset Card for "OxfordFlowers_test_google_flan_t5_xxl_mode_C_A_T_ns_6149" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
CVasNLPExperiments/OxfordFlowers_test_google_flan_t5_xxl_mode_C_A_T_ns_6149
[ "region:us" ]
2023-03-25T01:35:14+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "prompt", "dtype": "string"}, {"name": "true_label", "dtype": "string"}, {"name": "prediction", "dtype": "string"}], "splits": [{"name": "fewshot_0_clip_tags_ViT_L_14_LLM_Description_gpt3_downstream_tasks_visual_genome_ViT_L_14_clip_tags_ViT_L_14_simple_specific_rices", "num_bytes": 2519380, "num_examples": 6149}, {"name": "fewshot_1_clip_tags_ViT_L_14_LLM_Description_gpt3_downstream_tasks_visual_genome_ViT_L_14_clip_tags_ViT_L_14_simple_specific_rices", "num_bytes": 4887791, "num_examples": 6149}, {"name": "fewshot_3_clip_tags_ViT_L_14_LLM_Description_gpt3_downstream_tasks_visual_genome_ViT_L_14_clip_tags_ViT_L_14_simple_specific_rices", "num_bytes": 9611045, "num_examples": 6149}, {"name": "fewshot_5_clip_tags_ViT_L_14_LLM_Description_gpt3_downstream_tasks_visual_genome_ViT_L_14_clip_tags_ViT_L_14_simple_specific_rices", "num_bytes": 14323147, "num_examples": 6149}], "download_size": 4144716, "dataset_size": 31341363}}
2023-03-25T08:00:05+00:00
8b76213286194aa40d6f73ce169a089b148fd3ad
# codealpaca for text2text generation This dataset was downloaded from the [sahil280114/codealpaca](https://github.com/sahil280114/codealpaca) github repo and parsed into text2text format for "generating" instructions. It was downloaded under the **wonderful** Creative Commons Attribution-NonCommercial 4.0 International Public License (see snapshots of the [repo](https://web.archive.org/web/20230325040745/https://github.com/sahil280114/codealpaca) and [data license](https://web.archive.org/web/20230325041314/https://github.com/sahil280114/codealpaca/blob/master/DATA_LICENSE)), so that license applies to this dataset. Note that the `inputs` and `instruction` columns in the original dataset have been aggregated together for text2text generation. Each has a token with either `<instruction>` or `<inputs>` in front of the relevant text, both for model understanding and regex separation later. ## structure dataset structure: ```python DatasetDict({ train: Dataset({ features: ['instructions_inputs', 'output'], num_rows: 18014 }) test: Dataset({ features: ['instructions_inputs', 'output'], num_rows: 1000 }) validation: Dataset({ features: ['instructions_inputs', 'output'], num_rows: 1002 }) }) ``` ## example The example shows what rows **without** inputs will look like (approximately 60% of the dataset according to repo). Note the special tokens to identify what is what when the model generates text: `<instruction>` and `<input>`: ![example](https://i.imgur.com/bdZM4NW.png) ## token lengths bart ![bart](https://i.imgur.com/81qBl3e.png) t5 ![t5](https://i.imgur.com/63vOqP4.png)
pszemraj/fleece2instructions-codealpaca
[ "task_categories:text2text-generation", "task_categories:text-generation", "size_categories:10K<n<100K", "language:en", "license:cc-by-nc-4.0", "instructions", "domain adaptation", "region:us" ]
2023-03-25T02:23:47+00:00
{"language": ["en"], "license": "cc-by-nc-4.0", "size_categories": ["10K<n<100K"], "task_categories": ["text2text-generation", "text-generation"], "tags": ["instructions", "domain adaptation"]}
2023-03-25T04:54:47+00:00
f205b8412b2486e801a849a503ecd71169a83567
# fleece2instructions-inputs-alpaca-cleaned This data was downloaded from the [alpaca-lora](https://github.com/tloen/alpaca-lora) repo under the `ODC-BY` license (see [snapshot here](https://web.archive.org/web/20230325034703/https://github.com/tloen/alpaca-lora/blob/main/DATA_LICENSE)) and processed to text2text format. The license under which the data was downloaded from the source applies to this repo. Note that the `inputs` and `instruction` columns in the original dataset have been aggregated together for text2text generation. Each has a token with either `<instruction>` or `<inputs>` in front of the relevant text, both for model understanding and regex separation later. ## Processing details - Drop rows with `output` having less then 4 words (via `nltk.word_tokenize`) - This dataset **does** include both the original `instruction`s and the `inputs` columns, aggregated together into `instructions_inputs` - In the `instructions_inputs` column, the text is delineated via tokens that are either `<instruction>` or `<inputs>` in front of the relevant text, both for model understanding and regex separation later. ## contents ```python DatasetDict({ train: Dataset({ features: ['instructions_inputs', 'output'], num_rows: 43537 }) test: Dataset({ features: ['instructions_inputs', 'output'], num_rows: 2418 }) validation: Dataset({ features: ['instructions_inputs', 'output'], num_rows: 2420 }) }) ``` ## examples ![examples](https://i.imgur.com/07PnnPJ.png) ## token counts t5 ![t5](https://i.imgur.com/FbARV2S.png) bart ![bart](https://i.imgur.com/tGqtBwA.png)
pszemraj/fleece2instructions-inputs-alpaca-cleaned
[ "task_categories:text2text-generation", "size_categories:10K<n<100K", "language:en", "license:odc-by", "instructions", "generate instructions", "instruct", "region:us" ]
2023-03-25T03:21:05+00:00
{"language": ["en"], "license": "odc-by", "size_categories": ["10K<n<100K"], "task_categories": ["text2text-generation"], "tags": ["instructions", "generate instructions", "instruct"]}
2023-03-25T04:51:56+00:00
ae9658f9d0b4f19fb4d1d41987bc45bac2ccd054
# Dataset Card for "UA_speech_noisereduced_CM04_M11" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
AravindVadlapudi02/UA_speech_noisereduced_CM04_M11
[ "region:us" ]
2023-03-25T05:19:43+00:00
{"dataset_info": {"features": [{"name": "label", "dtype": {"class_label": {"names": {"0": "healthy control", "1": "pathology"}}}}, {"name": "input_features", "sequence": {"sequence": "float32"}}], "splits": [{"name": "train", "num_bytes": 384132800, "num_examples": 400}, {"name": "test", "num_bytes": 4983162748, "num_examples": 5189}], "download_size": 620573490, "dataset_size": 5367295548}}
2023-03-25T05:23:30+00:00
fc668e881360b880cb32a7105d983570c813124d
# Dataset Card for "UA_speech_noisereduced_CM04_CM12_M04_M11" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
AravindVadlapudi02/UA_speech_noisereduced_CM04_CM12_M04_M11
[ "region:us" ]
2023-03-25T05:51:44+00:00
{"dataset_info": {"features": [{"name": "label", "dtype": {"class_label": {"names": {"0": "healthy control", "1": "pathology"}}}}, {"name": "input_features", "sequence": {"sequence": "float32"}}], "splits": [{"name": "train", "num_bytes": 768265600, "num_examples": 800}, {"name": "test", "num_bytes": 4599029948, "num_examples": 4789}], "download_size": 620813146, "dataset_size": 5367295548}}
2023-03-25T05:55:44+00:00
c4dcea05adc71387db69929fc493137a63ca1804
WwJGy/nip.summarization
[ "license:other", "doi:10.57967/hf/0474", "region:us" ]
2023-03-25T05:58:21+00:00
{"license": "other"}
2023-03-25T07:03:59+00:00
fe5be4bbaea232711c771e323268fc6e3ab89a4e
# Dataset Card for "luganda_english_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) Dataset might contain a few mistakes, espeecially on the one word translations. Indicators for verbs and nouns (v.i and n.i) may not have been completely filtered out properly.
pkyoyetera/luganda_english_dataset
[ "task_categories:translation", "size_categories:10K<n<100K", "language:en", "language:lg", "license:apache-2.0", "region:us" ]
2023-03-25T06:34:10+00:00
{"language": ["en", "lg"], "license": "apache-2.0", "size_categories": ["10K<n<100K"], "task_categories": ["translation"], "dataset_info": {"features": [{"name": "English", "dtype": "string"}, {"name": "Luganda", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 11844863.620338032, "num_examples": 78238}], "download_size": 7020236, "dataset_size": 11844863.620338032}}
2023-03-25T19:54:14+00:00
6be0bcd787612fb151e2284e62467a9f5636a6d8
# Dataset Card for "NewArOCRDatasetv4" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
gagan3012/NewArOCRDatasetv4
[ "region:us" ]
2023-03-25T07:40:07+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2775261.0, "num_examples": 599}, {"name": "validation", "num_bytes": 2677639.0, "num_examples": 751}, {"name": "test", "num_bytes": 2638670.0, "num_examples": 752}], "download_size": 4137304, "dataset_size": 8091570.0}}
2023-03-25T07:40:17+00:00
0f6e6ed3f0730bd256d4b44e149d596caf945272
# Dataset Card for "OxfordFlowers_test_google_flan_t5_xl_mode_C_A_T_ns_6149" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
CVasNLPExperiments/OxfordFlowers_test_google_flan_t5_xl_mode_C_A_T_ns_6149
[ "region:us" ]
2023-03-25T08:11:49+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "prompt", "dtype": "string"}, {"name": "true_label", "dtype": "string"}, {"name": "prediction", "dtype": "string"}], "splits": [{"name": "fewshot_0_clip_tags_ViT_L_14_LLM_Description_gpt3_downstream_tasks_visual_genome_ViT_L_14_clip_tags_ViT_L_14_simple_specific_rices", "num_bytes": 2519586, "num_examples": 6149}, {"name": "fewshot_1_clip_tags_ViT_L_14_LLM_Description_gpt3_downstream_tasks_visual_genome_ViT_L_14_clip_tags_ViT_L_14_simple_specific_rices", "num_bytes": 4889340, "num_examples": 6149}, {"name": "fewshot_3_clip_tags_ViT_L_14_LLM_Description_gpt3_downstream_tasks_visual_genome_ViT_L_14_clip_tags_ViT_L_14_simple_specific_rices", "num_bytes": 9611851, "num_examples": 6149}, {"name": "fewshot_5_clip_tags_ViT_L_14_LLM_Description_gpt3_downstream_tasks_visual_genome_ViT_L_14_clip_tags_ViT_L_14_simple_specific_rices", "num_bytes": 14323006, "num_examples": 6149}], "download_size": 4144345, "dataset_size": 31343783}}
2023-03-25T11:08:26+00:00
d2e2bbaa4a1b2768460b41d95974c2161fc07da3
Rrrrr337/sample
[ "license:unknown", "region:us" ]
2023-03-25T08:14:49+00:00
{"license": "unknown"}
2023-03-25T08:17:32+00:00
a6fe04cf83c91fcf2258735d4fb34595443424bf
acheong08/nsfw_reddit
[ "license:openrail", "region:us" ]
2023-03-25T08:23:53+00:00
{"license": "openrail"}
2023-04-09T12:44:10+00:00
339357664b503c1fd4992ea95b01ab7e24fd6135
weiyun/my_qa
[ "region:us" ]
2023-03-25T08:29:14+00:00
{"dataset_info": [{"config_name": "predict_test", "features": [{"name": "src_txt", "dtype": "string"}, {"name": "tgt_txt", "dtype": "string"}], "splits": [{"name": "test"}, {"name": "train"}, {"name": "validation"}]}]}
2023-03-30T16:09:58+00:00
aa1d187dddba5004b6309f012bcf7aad46997c49
# Dataset Card for "npc-dialogue" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
amaydle/npc-dialogue
[ "region:us" ]
2023-03-25T09:11:12+00:00
{"dataset_info": {"features": [{"name": "Name", "dtype": "string"}, {"name": "Biography", "dtype": "string"}, {"name": "Query", "dtype": "string"}, {"name": "Response", "dtype": "string"}, {"name": "Emotion", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 737058.9117493472, "num_examples": 1723}, {"name": "test", "num_bytes": 82133.08825065274, "num_examples": 192}], "download_size": 201559, "dataset_size": 819192.0}}
2023-03-25T09:11:29+00:00
9902ce361fa0ee4c694012c73d626463be37d682
# Dataset Card for "UA_speech_noisereduced_CM04_CM05_CM12_M04_M05_M11" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
AravindVadlapudi02/UA_speech_noisereduced_CM04_CM05_CM12_M04_M05_M11
[ "region:us" ]
2023-03-25T09:20:33+00:00
{"dataset_info": {"features": [{"name": "label", "dtype": {"class_label": {"names": {"0": "healthy control", "1": "pathology"}}}}, {"name": "input_features", "sequence": {"sequence": "float32"}}], "splits": [{"name": "train", "num_bytes": 1152398400, "num_examples": 1200}, {"name": "test", "num_bytes": 4214897148, "num_examples": 4389}], "download_size": 620605305, "dataset_size": 5367295548}}
2023-03-25T09:21:37+00:00
e3f1b0bd52c6a53ece48c8ef44a36f37418dfc62
arulpraveent/Tamil_2_Eng_dataset
[ "license:apache-2.0", "region:us" ]
2023-03-25T10:35:40+00:00
{"license": "apache-2.0"}
2023-03-25T10:35:40+00:00
c71fd4ee19ce4b9e2c253194b1e45e1ad8b200a2
# Dataset Card for Law Area Prediction ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary The dataset contains cases to be classified into the four main areas of law: Public, Civil, Criminal and Social These can be classified further into sub-areas: ``` "public": ['Tax', 'Urban Planning and Environmental', 'Expropriation', 'Public Administration', 'Other Fiscal'], "civil": ['Rental and Lease', 'Employment Contract', 'Bankruptcy', 'Family', 'Competition and Antitrust', 'Intellectual Property'], 'criminal': ['Substantive Criminal', 'Criminal Procedure'] ``` ### Supported Tasks and Leaderboards Law Area Prediction can be used as text classification task ### Languages Switzerland has four official languages with three languages German, French and Italian being represenated. The decisions are written by the judges and clerks in the language of the proceedings. | Language | Subset | Number of Documents| |------------|------------|--------------------| | German | **de** | 127K | | French | **fr** | 156K | | Italian | **it** | 46K | ## Dataset Structure - decision_id: unique identifier for the decision - facts: facts section of the decision - considerations: considerations section of the decision - law_area: label of the decision (main area of law) - law_sub_area: sub area of law of the decision - language: language of the decision - year: year of the decision - court: court of the decision - chamber: chamber of the decision - canton: canton of the decision - region: region of the decision ### Data Fields [More Information Needed] ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits The dataset was split date-stratisfied - Train: 2002-2015 - Validation: 2016-2017 - Test: 2018-2022 ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization The original data are published from the Swiss Federal Supreme Court (https://www.bger.ch) in unprocessed formats (HTML). The documents were downloaded from the Entscheidsuche portal (https://entscheidsuche.ch) in HTML. #### Who are the source language producers? The decisions are written by the judges and clerks in the language of the proceedings. ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information The dataset contains publicly available court decisions from the Swiss Federal Supreme Court. Personal or sensitive information has been anonymized by the court before publication according to the following guidelines: https://www.bger.ch/home/juridiction/anonymisierungsregeln.html. ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information We release the data under CC-BY-4.0 which complies with the court licensing (https://www.bger.ch/files/live/sites/bger/files/pdf/de/urteilsveroeffentlichung_d.pdf) © Swiss Federal Supreme Court, 2002-2022 The copyright for the editorial content of this website and the consolidated texts, which is owned by the Swiss Federal Supreme Court, is licensed under the Creative Commons Attribution 4.0 International licence. This means that you can re-use the content provided you acknowledge the source and indicate any changes you have made. Source: https://www.bger.ch/files/live/sites/bger/files/pdf/de/urteilsveroeffentlichung_d.pdf ### Citation Information Please cite our [ArXiv-Preprint](https://arxiv.org/abs/2306.09237) ``` @misc{rasiah2023scale, title={SCALE: Scaling up the Complexity for Advanced Language Model Evaluation}, author={Vishvaksenan Rasiah and Ronja Stern and Veton Matoshi and Matthias Stürmer and Ilias Chalkidis and Daniel E. Ho and Joel Niklaus}, year={2023}, eprint={2306.09237}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ### Contributions
rcds/swiss_law_area_prediction
[ "task_categories:text-classification", "annotations_creators:machine-generated", "language_creators:expert-generated", "multilinguality:multilingual", "size_categories:100K<n<1M", "source_datasets:original", "language:de", "language:fr", "language:it", "license:cc-by-sa-4.0", "arxiv:2306.09237", "region:us" ]
2023-03-25T10:51:36+00:00
{"annotations_creators": ["machine-generated"], "language_creators": ["expert-generated"], "language": ["de", "fr", "it"], "license": "cc-by-sa-4.0", "multilinguality": ["multilingual"], "size_categories": ["100K<n<1M"], "source_datasets": ["original"], "task_categories": ["text-classification"], "pretty_name": "Law Area Prediction"}
2023-07-20T06:38:52+00:00
aa6dbc3c7963f1c47f70e9d216edf62c7fd8917e
MillionScope/millionscope
[ "license:mit", "region:us" ]
2023-03-25T11:25:56+00:00
{"license": "mit"}
2023-03-25T11:25:56+00:00
f39db019a94f8dbea48ab30d2bdc090703284559
# Dataset Description - **Project Page:** https://instruction-tuning-with-gpt-4.github.io - **Repo:** https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM - **Paper:** https://arxiv.org/abs/2304.03277 # Dataset Card for "alpaca-zh" 本数据集是参考Alpaca方法基于GPT4得到的self-instruct数据,约5万条。 Dataset from https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM It is the chinese dataset from https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM/blob/main/data/alpaca_gpt4_data_zh.json # Usage and License Notices The data is intended and licensed for research use only. The dataset is CC BY NC 4.0 (allowing only non-commercial use) and models trained using the dataset should not be used outside of research purposes. train model with alpaca-zh dataset: https://github.com/shibing624/textgen # English Dataset [Found here](https://huggingface.co/datasets/c-s-ale/alpaca-gpt4-data) # Citation ``` @article{peng2023gpt4llm, title={Instruction Tuning with GPT-4}, author={Baolin Peng, Chunyuan Li, Pengcheng He, Michel Galley, Jianfeng Gao}, journal={arXiv preprint arXiv:2304.03277}, year={2023} } ```
shibing624/alpaca-zh
[ "task_categories:text-generation", "size_categories:10K<n<100K", "language:zh", "license:cc-by-4.0", "gpt", "alpaca", "fine-tune", "instruct-tune", "instruction", "arxiv:2304.03277", "region:us" ]
2023-03-25T11:37:25+00:00
{"language": ["zh"], "license": "cc-by-4.0", "size_categories": ["10K<n<100K"], "task_categories": ["text-generation"], "pretty_name": "Instruction Tuning with GPT-4", "dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 32150579, "num_examples": 48818}], "download_size": 35100559, "dataset_size": 32150579}, "tags": ["gpt", "alpaca", "fine-tune", "instruct-tune", "instruction"]}
2023-05-10T05:09:06+00:00
0989639a187afad84c687ebce824505159473a56
# AutoTrain Dataset for project: pegasus-reddit-summarizer ## Dataset Description This dataset has been automatically processed by AutoTrain for project pegasus-reddit-summarizer. ### Languages The BCP-47 code for the dataset's language is en. ## Dataset Structure ### Data Instances A sample from this dataset looks as follows: ```json [ { "feat_id": "82n2za", "text": "User who has been working in sales for 30+ years gets a new laptop on Monday. This morning when I get in, my phone is ringing already. I'm not supposed to start for another 20 mins, but I'm nice, so I answer it.\n\n\"This new laptop doesn't have Microsoft on it. Do I need to bring it back in? Just I'm in Scotland, so I'll have to fly down again.\"\n\nEr, yes it does. We went through it when I handed it over, I showed you Outlook, and how Outlook 2016 looks ever so slightly different to Outlook 2010 on your old laptop.\n\n\"Look, it's not there. Every time I click on the button, it just opens the internet. I've emailed my boss from my phone to let him know I'm cancelling all my appointments today, so can you fix it over the VPN or do I need to fly down?\"\n\nSo, I ask him what he's clicking on. \"The blue E. You said the icon was blue now instead of orange. But that just opens the internet, I've already TOLD YOU.\"\n\nI ask him to look along the taskbar for any other blue icons. \"There's a blue and white O. Are you telling me that's it?\" I ask him to confirm that Outlook begins with the letter O, and advise him to try clicking on that icon instead.\n\nSo he clicks on it, and ta-da! Outlook opens. \"Oh for God's sake. This is too confusing. Why did you change the colour anyway? Now I have to re-arrange all my appointments, this is really inconvenient.\"\n\nSorry, I did ring up my mate Bill and ask him to change the colour of Outlook from orange to blue just to confuse you. Luckily I have great power and influence over at Microsoft, so they did me a favour, and I'm now reaping the untold rewards.\n\nGTG, writing an email to his boss to cover my arse...\n", "target": "User receives a new laptop and complains to IT that it doesn't have Microsoft on it. IT informs the user that they had gone through it when handing it over and that the user had simply clicked on the wrong icon. The user complains about the change in icon color and that they now have to rearrange their entire schedule. IT sarcastically apologizes and writes an email to cover themselves." }, { "feat_id": "q4kjoe", "text": "The title implies I was there but really it was just my mom and my sister.\n\nMy sister was craving a cheddar jalapeo bagel so my mom decided to go to a chain caf to get one for her. It was 10 minutes before closing, and they went through the drive thru. My mom orders the cheddar bagel for my sister plus some other things for the rest of the people at home, including coffee cake. The gal at the drive thru window said \"you're lucky, you're getting the last ones of everything you're ordering!\"\n\nMy mom pulls up to the window to pay and receive the food and the drive thru gal (about 19) is crying and apologizing profusely. She says the people in front of my mom STOLE THE FOOD. Mom asked how it happened and the lady said that she had made a mistake and was about to give the car in front the wrong order, but she realized her mistake before handing it over and announced it. The people then REACHED for the bag (it was not handed to them!!!) and stole it, apparently saying \"you can't have it back now, it's cross contaminated!\" Then when the lady called for her manager, he was busy, and the people's order wasn't ready yet, so the poor gal just told them to pull up and wait for their food and they did.\n\nMy mom is a really loving person and so she's trying to tell this lady it's okay, she didn't really need the food, she's not mad, etc., and in the meantime the manager comes over to ask what is happening. She tells him and he is shocked. He asked if the car in front was those people, and she said yes. So he starts going out to talk to the people in the car, and at that moment, they step on it and zip out of the parking lot. \n\nSo now those people have not only stolen my mom's order, which were the last items, but they didn't even receive their order! But the good news is that the manager said to my mom that he had been saving a cheddar bagel for himself and that he would give that one to her free of charge. \n\nHave you ever heard of anything like this??? My mom told me this on the phone and I was stunned. I've worked food service before but nothing like this has ever happened!! She thinks the people in the other car had done this maneuver before since the \"cross contamination\" response came out way too quickly. Also I feel so sorry for the lady! She's working in a fucking pandemic getting underpaid and overworked and now has to deal with deranged people!", "target": "A woman went to a chain caf\u00e9 with her daughter to buy a cheddar jalape\u00f1o bagel for her sister. The drive thru attendant announces they are getting the last items of everything. The attendant then reveals that the people in the car in front of them stole their food. The woman's mother attempted to comfort the attendant and the manager offered the woman a cheddar bagel for free. The woman wonders if the \"cross contamination\" defense may have been used by the thieves before." } ] ``` ### Dataset Fields The dataset has the following fields (also called "features"): ```json { "feat_id": "Value(dtype='string', id=None)", "text": "Value(dtype='string', id=None)", "target": "Value(dtype='string', id=None)" } ``` ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow: | Split name | Num samples | | ------------ | ------------------- | | train | 7200 | | valid | 1800 |
stevied67/autotrain-data-pegasus-reddit-summarizer
[ "task_categories:summarization", "language:en", "region:us" ]
2023-03-25T11:50:33+00:00
{"language": ["en"], "task_categories": ["summarization"]}
2023-03-25T11:51:23+00:00
73cc18da1ea455dcc2fed97eca91e325888415b6
# Dataset Card for Dataset Name ## Dataset Description - **Homepage:** - **Repository:** https://github.com/mskandalis/daccord-dataset-contradictions - **Paper:** https://aclanthology.org/2023.jeptalnrecital-long.22/ - **Leaderboard:** - **Point of Contact:** ### Dataset Summary The DACCORD dataset is an entirely new collection of 1034 sentence pairs annotated as a binary classification task for automatic detection of contradictions between sentences in French. Each pair of sentences receives a label according to whether or not the two sentences contradict each other. DACCORD currently covers the themes of Russia’s invasion of Ukraine in 2022, the Covid-19 pandemic, and the climate crisis. The sentences of the dataset were extracted from (or based on sentences from) AFP Factuel articles. ### Supported Tasks and Leaderboards The task of automatic detection of contradictions between sentences is a sentence-pair binary classification task. It can be viewed as a task related to both natural language inference task and misinformation detection task. ## Dataset Structure ### Data Fields - `id`: Index number. - `premise`: The translated premise in the target language. - `hypothesis`: The translated premise in the target language. - `label`: The classification label, with possible values 0 (`entailment`), 1 (`neutral`), 2 (`contradiction`). - `label_text`: The classification label, with possible values `entailment` (0), `neutral` (1), `contradiction` (2). - `genre`: a `string` feature . ### Data Splits | theme |contradiction|compatible| |----------------|------------:|---------:| |Russian invasion| 215 | 257 | | Covid-19 | 251 | 199 | | Climate change | 49 | 63 | ## Additional Information ### Citation Information **BibTeX:** ````BibTeX @inproceedings{skandalis-etal-2023-daccord, title = "{DACCORD} : un jeu de donn{\'e}es pour la D{\'e}tection Automatique d{'}{\'e}non{C}{\'e}s {CO}nt{R}a{D}ictoires en fran{\c{c}}ais", author = "Skandalis, Maximos and Moot, Richard and Robillard, Simon", booktitle = "Actes de CORIA-TALN 2023. Actes de la 30e Conf{\'e}rence sur le Traitement Automatique des Langues Naturelles (TALN), volume 1 : travaux de recherche originaux -- articles longs", month = "6", year = "2023", address = "Paris, France", publisher = "ATALA", url = "https://aclanthology.org/2023.jeptalnrecital-long.22", pages = "285--297", abstract = "La t{\^a}che de d{\'e}tection automatique de contradictions logiques entre {\'e}nonc{\'e}s en TALN est une t{\^a}che de classification binaire, o{\`u} chaque paire de phrases re{\c{c}}oit une {\'e}tiquette selon que les deux phrases se contredisent ou non. Elle peut {\^e}tre utilis{\'e}e afin de lutter contre la d{\'e}sinformation. Dans cet article, nous pr{\'e}sentons DACCORD, un jeu de donn{\'e}es d{\'e}di{\'e} {\`a} la t{\^a}che de d{\'e}tection automatique de contradictions entre phrases en fran{\c{c}}ais. Le jeu de donn{\'e}es {\'e}labor{\'e} est actuellement compos{\'e} de 1034 paires de phrases. Il couvre les th{\'e}matiques de l{'}invasion de la Russie en Ukraine en 2022, de la pand{\'e}mie de Covid-19 et de la crise climatique. Pour mettre en avant les possibilit{\'e}s de notre jeu de donn{\'e}es, nous {\'e}valuons les performances de certains mod{\`e}les de transformeurs sur lui. Nous constatons qu{'}il constitue pour eux un d{\'e}fi plus {\'e}lev{\'e} que les jeux de donn{\'e}es existants pour le fran{\c{c}}ais, qui sont d{\'e}j{\`a} peu nombreux. In NLP, the automatic detection of logical contradictions between statements is a binary classification task, in which a pair of sentences receives a label according to whether or not the two sentences contradict each other. This task has many potential applications, including combating disinformation. In this article, we present DACCORD, a new dataset dedicated to the task of automatically detecting contradictions between sentences in French. The dataset is currently composed of 1034 sentence pairs. It covers the themes of Russia{'}s invasion of Ukraine in 2022, the Covid-19 pandemic, and the climate crisis. To highlight the possibilities of our dataset, we evaluate the performance of some recent Transformer models on it. We conclude that our dataset is considerably more challenging than the few existing datasets for French.", language = "French", } ```` **ACL:** Maximos Skandalis, Richard Moot, and Simon Robillard. 2023. [DACCORD : un jeu de données pour la Détection Automatique d’énonCés COntRaDictoires en français](https://aclanthology.org/2023.jeptalnrecital-long.22). In *Actes de CORIA-TALN 2023. Actes de la 30e Conférence sur le Traitement Automatique des Langues Naturelles (TALN), volume 1 : travaux de recherche originaux -- articles longs*, pages 285–297, Paris, France. ATALA. ### Acknowledgements This work was supported by the Defence Innovation Agency (AID) of the Directorate General of Armament (DGA) of the French Ministry of Armed Forces, and by the ICO, _Institut Cybersécurité Occitanie_, funded by Région Occitanie, France.
maximoss/daccord-contradictions
[ "task_categories:text-classification", "task_ids:multi-input-text-classification", "size_categories:1K<n<10K", "language:fr", "license:bsd-2-clause", "region:us" ]
2023-03-25T12:03:33+00:00
{"language": ["fr"], "license": "bsd-2-clause", "size_categories": ["1K<n<10K"], "task_categories": ["text-classification"], "task_ids": ["multi-input-text-classification"]}
2024-02-04T12:31:29+00:00
69f64ef5e23c1cf2643f166c6c478fc0a68d166c
# Dataset Card for Dataset Name ## Dataset Description - **Homepage:** - **Repository:** https://github.com/mskandalis/rte3-french - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This repository contains all manually translated versions of RTE-3 dataset, plus the original English one. The languages into which RTE-3 dataset has so far been translated are Italian (2012), German (2013), and French (2023). Unlike in other repositories, both our own French version and the older Italian and German ones are here annotated in 3 classes (entailment, neutral, contradiction), and not in 2 (entailment, not entailment). If you want to use the dataset only in a specific language among those provided here, you can filter data by selecting only the language column value you wish. ### Supported Tasks and Leaderboards This dataset can be used for the task of Natural Language Inference (NLI), also known as Recognizing Textual Entailment (RTE), which is a sentence-pair classification task. ## Dataset Structure ### Data Fields - `id`: Index number. - `language`: The language of the concerned pair of sentences. - `premise`: The translated premise in the target language. - `hypothesis`: The translated premise in the target language. - `label`: The classification label, with possible values 0 (`entailment`), 1 (`neutral`), 2 (`contradiction`). - `label_text`: The classification label, with possible values `entailment` (0), `neutral` (1), `contradiction` (2). - `task`: The particular NLP task that the data was drawn from (IE, IR, QA and SUM). - `length`: The length of the text of the pair. ### Data Splits | name |development|test| |-------------|----------:|---:| |all_languages| 3200 |3200| | fr | 800 | 800| | de | 800 | 800| | it | 800 | 800| For French RTE-3: | name |entailment|neutral|contradiction| |-------------|---------:|------:|------------:| | dev | 412 | 299 | 89 | | test | 410 | 318 | 72 | | name |short|long| |-------------|----:|---:| | dev | 665 | 135| | test | 683 | 117| | name | IE| IR| QA|SUM| |-------------|--:|--:|--:|--:| | dev |200|200|200|200| | test |200|200|200|200| ## Additional Information ### Citation Information **BibTeX:** ````BibTeX @inproceedings{giampiccolo-etal-2007-third, title = "The Third {PASCAL} Recognizing Textual Entailment Challenge", author = "Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill", booktitle = "Proceedings of the {ACL}-{PASCAL} Workshop on Textual Entailment and Paraphrasing", month = jun, year = "2007", address = "Prague", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W07-1401", pages = "1--9", } ```` **ACL:** Danilo Giampiccolo, Bernardo Magnini, Ido Dagan, and Bill Dolan. 2007. [The Third PASCAL Recognizing Textual Entailment Challenge](https://aclanthology.org/W07-1401). In *Proceedings of the ACL-PASCAL Workshop on Textual Entailment and Paraphrasing*, pages 1–9, Prague. Association for Computational Linguistics. ### Acknowledgements This work was supported by the Defence Innovation Agency (AID) of the Directorate General of Armament (DGA) of the French Ministry of Armed Forces, and by the ICO, _Institut Cybersécurité Occitanie_, funded by Région Occitanie, France.
maximoss/rte3-multi
[ "task_categories:text-classification", "task_ids:natural-language-inference", "task_ids:multi-input-text-classification", "size_categories:1K<n<10K", "language:fr", "language:en", "language:it", "language:de", "license:cc-by-4.0", "region:us" ]
2023-03-25T12:04:19+00:00
{"language": ["fr", "en", "it", "de"], "license": "cc-by-4.0", "size_categories": ["1K<n<10K"], "task_categories": ["text-classification"], "task_ids": ["natural-language-inference", "multi-input-text-classification"]}
2024-02-04T12:23:56+00:00
02a9c28b1d6e6ddbdc484575d50014119070e7b5
# Dataset Card for Dataset Name ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This repository contains a collection of machine translations of [LingNLI](https://github.com/Alicia-Parrish/ling_in_loop) dataset into 9 different languages (Bulgarian, Finnish, French, Greek, Italian, Korean, Lithuanian, Portuguese, Spanish). The goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B), which is a classification task (given two sentences, predict one of three labels). It is here formatted in the same manner as the widely used [XNLI](https://huggingface.co/datasets/xnli) dataset for convenience. If you want to use this dataset only in a specific language among those provided here, you can filter data by selecting only the language column value you wish. ### Supported Tasks and Leaderboards This dataset can be used for the task of Natural Language Inference (NLI), also known as Recognizing Textual Entailment (RTE), which is a sentence-pair classification task. ## Dataset Structure ### Data Fields - `language`: The language in which the pair of sentences is given. - `premise`: The machine translated premise in the target language. - `hypothesis`: The machine translated premise in the target language. - `label`: The classification label, with possible values 0 (`entailment`), 1 (`neutral`), 2 (`contradiction`). - `label_text`: The classification label, with possible values `entailment` (0), `neutral` (1), `contradiction` (2). - `premise_original`: The original premise from the English source dataset. - `hypothesis_original`: The original hypothesis from the English source dataset. ### Data Splits For the whole dataset (LitL and LotS subsets): | language |train|validation| |-------------|----:|---------:| |all_languages|269865| 44037| |el-gr |29985| 4893| |fr |29985| 4893| |it |29985| 4893| |es |29985| 4893| |pt |29985| 4893| |ko |29985| 4893| |fi |29985| 4893| |lt |29985| 4893| |bg |29985| 4893| For LitL subset: | language |train|validation| |-------------|----:|---------:| |all_languages|134955| 21825| |el-gr |14995| 2425| |fr |14995| 2425| |it |14995| 2425| |es |14995| 2425| |pt |14995| 2425| |ko |14995| 2425| |fi |14995| 2425| |lt |14995| 2425| |bg |14995| 2425| For LotS subset: | language |train|validation| |-------------|----:|---------:| |all_languages|134910| 22212| |el-gr |14990| 2468| |fr |14990| 2468| |it |14990| 2468| |es |14990| 2468| |pt |14990| 2468| |ko |14990| 2468| |fi |14990| 2468| |lt |14990| 2468| |bg |14990| 2468| ## Dataset Creation The two subsets of the original dataset were machine translated using the latest neural machine translation [opus-mt-tc-big](https://huggingface.co/models?sort=downloads&search=opus-mt-tc-big) models available for the respective languages. Running the translations lasted from March 25, 2023 until April 8, 2023. ## Additional Information ### Citation Information **BibTeX:** ````BibTeX @inproceedings{parrish-etal-2021-putting-linguist, title = "Does Putting a Linguist in the Loop Improve {NLU} Data Collection?", author = "Parrish, Alicia and Huang, William and Agha, Omar and Lee, Soo-Hwan and Nangia, Nikita and Warstadt, Alexia and Aggarwal, Karmanya and Allaway, Emily and Linzen, Tal and Bowman, Samuel R.", booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2021", month = nov, year = "2021", address = "Punta Cana, Dominican Republic", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2021.findings-emnlp.421", doi = "10.18653/v1/2021.findings-emnlp.421", pages = "4886--4901", abstract = "Many crowdsourced NLP datasets contain systematic artifacts that are identified only after data collection is complete. Earlier identification of these issues should make it easier to create high-quality training and evaluation data. We attempt this by evaluating protocols in which expert linguists work {`}in the loop{'} during data collection to identify and address these issues by adjusting task instructions and incentives. Using natural language inference as a test case, we compare three data collection protocols: (i) a baseline protocol with no linguist involvement, (ii) a linguist-in-the-loop intervention with iteratively-updated constraints on the writing task, and (iii) an extension that adds direct interaction between linguists and crowdworkers via a chatroom. We find that linguist involvement does not lead to increased accuracy on out-of-domain test sets compared to baseline, and adding a chatroom has no effect on the data. Linguist involvement does, however, lead to more challenging evaluation data and higher accuracy on some challenge sets, demonstrating the benefits of integrating expert analysis during data collection.", } @inproceedings{tiedemann-thottingal-2020-opus, title = "{OPUS}-{MT} {--} Building open translation services for the World", author = {Tiedemann, J{\"o}rg and Thottingal, Santhosh}, booktitle = "Proceedings of the 22nd Annual Conference of the European Association for Machine Translation", month = nov, year = "2020", address = "Lisboa, Portugal", publisher = "European Association for Machine Translation", url = "https://aclanthology.org/2020.eamt-1.61", pages = "479--480", abstract = "This paper presents OPUS-MT a project that focuses on the development of free resources and tools for machine translation. The current status is a repository of over 1,000 pre-trained neural machine translation models that are ready to be launched in on-line translation services. For this we also provide open source implementations of web applications that can run efficiently on average desktop hardware with a straightforward setup and installation.", } ```` **ACL:** Alicia Parrish, William Huang, Omar Agha, Soo-Hwan Lee, Nikita Nangia, Alexia Warstadt, Karmanya Aggarwal, Emily Allaway, Tal Linzen, and Samuel R. Bowman. 2021. [Does Putting a Linguist in the Loop Improve NLU Data Collection?](https://aclanthology.org/2021.findings-emnlp.421). In *Findings of the Association for Computational Linguistics: EMNLP 2021*, pages 4886–4901, Punta Cana, Dominican Republic. Association for Computational Linguistics. Jörg Tiedemann and Santhosh Thottingal. 2020. [OPUS-MT – Building open translation services for the World](https://aclanthology.org/2020.eamt-1.61). In *Proceedings of the 22nd Annual Conference of the European Association for Machine Translation*, pages 479–480, Lisboa, Portugal. European Association for Machine Translation. ### Acknowledgements These translations of the original dataset were done as part of a research project supported by the Defence Innovation Agency (AID) of the Directorate General of Armament (DGA) of the French Ministry of Armed Forces, and by the ICO, _Institut Cybersécurité Occitanie_, funded by Région Occitanie, France.
maximoss/lingnli-multi-mt
[ "task_categories:text-classification", "task_ids:natural-language-inference", "task_ids:multi-input-text-classification", "size_categories:10K<n<100K", "language:el", "language:fr", "language:it", "language:es", "language:pt", "language:ko", "language:fi", "language:lt", "language:bg", "license:bsd-2-clause", "region:us" ]
2023-03-25T12:06:26+00:00
{"language": ["el", "fr", "it", "es", "pt", "ko", "fi", "lt", "bg"], "license": "bsd-2-clause", "size_categories": ["10K<n<100K"], "task_categories": ["text-classification"], "task_ids": ["natural-language-inference", "multi-input-text-classification"]}
2024-02-04T12:26:55+00:00
fd2c20ba7d93cb90f7a8a8e1c3266dda260fa06f
Maciel/e-commerce-sample-images
[ "license:apache-2.0", "region:us" ]
2023-03-25T12:11:12+00:00
{"license": "apache-2.0"}
2023-03-25T12:11:49+00:00
ce1dc54de2ec1f5591bb62e7324d7b11733aded7
# Dataset Card for "tokenized-codeparrot-train-verilog" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
rohitsuv/tokenized-codeparrot-train-verilog
[ "region:us" ]
2023-03-25T12:11:52+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "ratio_char_token", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 3664280, "num_examples": 5906}], "download_size": 879597, "dataset_size": 3664280}}
2023-03-25T12:11:55+00:00
b0f79ac04f910b42d68bf18c76e3a09b03e1b232
## This is a dataset of Onion news articles: Note - The headers and body of the news article is split by a ' #~# ' token - Lines with just the token had no body or no header and can be skipped - Feel free to use the script provided to scape the latest version, it takes about 30 mins on an i7-6850K
Biddls/Onion_News
[ "task_categories:summarization", "task_categories:text2text-generation", "task_categories:text-generation", "task_categories:text-classification", "language:en", "license:mit", "region:us" ]
2023-03-25T12:50:01+00:00
{"language": ["en"], "license": "mit", "task_categories": ["summarization", "text2text-generation", "text-generation", "text-classification"], "pretty_name": "OnionNewsScrape"}
2023-03-25T12:57:47+00:00
c5588cba87a4917dc094449910d091a76094cebc
# Dataset Card for "oa_tell_a_joke_10000" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mikegarts/oa_tell_a_joke_10000
[ "region:us" ]
2023-03-25T13:12:26+00:00
{"dataset_info": {"features": [{"name": "INSTRUCTION", "dtype": "string"}, {"name": "RESPONSE", "dtype": "string"}, {"name": "SOURCE", "dtype": "string"}, {"name": "METADATA", "struct": [{"name": "link", "dtype": "string"}, {"name": "nsfw", "dtype": "bool"}]}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 6108828, "num_examples": 10000}], "download_size": 3247379, "dataset_size": 6108828}}
2023-03-25T13:12:29+00:00
48c0d6d7c7801502f9f7adc444c3ce833dae3319
grakky1510/growheads
[ "license:apache-2.0", "region:us" ]
2023-03-25T13:38:30+00:00
{"license": "apache-2.0"}
2023-03-25T17:35:04+00:00
18add89e3b884703ec869a5c6e2bcf1412ee7edc
# Instruction-Finetuning Dataset Collection (Alpaca-CoT) This repository will continuously collect various instruction tuning datasets. And we standardize different datasets into the same format, which can be directly loaded by the [code](https://github.com/PhoebusSi/alpaca-CoT) of Alpaca model. We also have conducted empirical study on various instruction-tuning datasets based on the Alpaca model, as shown in [https://github.com/PhoebusSi/alpaca-CoT](https://github.com/PhoebusSi/alpaca-CoT). If you think this dataset collection is helpful to you, please `like` this dataset and `star` our [github project](https://github.com/PhoebusSi/alpaca-CoT)! You are in a warm welcome to provide us with any non-collected instruction-tuning datasets (or their sources). We will uniformly format them, train Alpaca model with these datasets and open source the model checkpoints. # Contribute Welcome to join us and become a contributor to this project! If you want to share some datasets, adjust the data in the following format: ``` example.json [ {"instruction": instruction string, "input": input string, # (may be empty) "output": output string} ] ``` Folder should be like this: ``` Alpaca-CoT | |----example | | | |----example.json | | | ----example_context.json ... ``` Create a new pull request in [Community ](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT/discussions) and publish your branch when you are ready. We will merge it as soon as we can. # Data Usage and Resources ## Data Format All data in this folder is formatted into the same templates, where each sample is as follows: ``` [ {"instruction": instruction string, "input": input string, # (may be empty) "output": output string} ] ``` ## alpaca #### alpaca_data.json > This dataset is published by [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca). It contains 52K English instruction-following samples obtained by [Self-Instruction](https://github.com/yizhongw/self-instruct) techniques. #### alpaca_data_cleaned.json > This dataset is obtained [here](https://github.com/tloen/alpaca-lora). It is a revised version of `alpaca_data.json` by stripping of various tokenization artifacts. ## alpacaGPT4 #### alpaca_gpt4_data.json > This dataset is published by [Instruction-Tuning-with-GPT-4](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM). It contains 52K English instruction-following samples generated by GPT-4 using Alpaca prompts for fine-tuning LLMs. #### alpaca_gpt4_data_zh.json > This dataset is generated by GPT-4 using Chinese prompts translated from Alpaca by ChatGPT. <!-- ## belle_cn #### belle_data_cn.json This dataset is published by [BELLE](https://github.com/LianjiaTech/BELLE). It contains 0.5M Chinese instruction-following samples, which is also generated by [Self-Instruction](https://github.com/yizhongw/self-instruct) techniques. #### belle_data1M_cn.json This dataset is published by [BELLE](https://github.com/LianjiaTech/BELLE). It contains 1M Chinese instruction-following samples. The data of `belle_data_cn.json` and `belle_data1M_cn.json` are not duplicated. --> ## Chain-of-Thought #### CoT_data.json > This dataset is obtained by formatting the combination of 9 CoT datasets published by [FLAN](https://github.com/google-research/FLAN). It contains 9 CoT tasks involving 74771 samples. #### CoT_CN_data.json > This dataset is obtained by tranlating `CoT_data.json` into Chinese, using Google Translate(en2cn). #### formatted_cot_data folder > This folder contains the formatted English data for each CoT dataset. #### formatted_cot_data folder > This folder contains the formatted Chinese data for each CoT dataset. ## CodeAlpaca #### code_alpaca.json > This dataset is published by [codealpaca](https://github.com/sahil280114/codealpaca). It contains code generation task involving 20022 samples. ## finance #### finance_en.json > This dataset is collected from [here](https://huggingface.co/datasets/gbharti/finance-alpaca). It contains 68912 financial related instructions in English. ## firefly #### firefly.json > his dataset is collected from [here](https://github.com/yangjianxin1/Firefly). It contains 1649398 chinese instructions in 23 nlp tasks. ## GPT4all #### gpt4all.json > This dataset is collected from [here](https://github.com/nomic-ai/gpt4all). It contains 806199 en instructions in code, storys and dialogs tasks. #### gpt4all_without_p3.json > gpt4all without Bigscience/P3, contains 437605 samples. ## GPTeacher #### GPTeacher.json > This dataset is collected from [here](https://github.com/teknium1/GPTeacher). It contains 29013 en instructions generated by GPT-4, General-Instruct - Roleplay-Instruct - Code-Instruct - and Toolformer. ## Guanaco #### GuanacoDataset.json > This dataset is collected from [here](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset). It contains 534610 en instructions generated by text-davinci-003 upon 175 tasks from the Alpaca model by providing rewrites of seed tasks in different languages and adding new tasks specifically designed for English grammar analysis, natural language understanding, cross-lingual self-awareness, and explicit content recognition. #### Guanaco_additional_Dataset.json > A new additional larger dataset for different languages. ## HC3 #### HC3_ChatGPT.json/HC3_Human.json > This dataset is collected from [here](https://huggingface.co/datasets/Hello-SimpleAI/HC3). It contains 37175 en/zh instructions generated by ChatGPT and human. #### HC3_ChatGPT_deduplication.json/HC3_Human_deduplication.json > HC3 dataset without deduplication instructions. ## instinwild #### instinwild_en.json & instinwild_cn.json > The two datasets are obtained [here](https://github.com/XueFuzhao/InstructionWild). It contains 52191 English and 51504 Chinese instructions, which are collected from Twitter, where users tend to share their interesting prompts of mostly generation, open QA, and mind-storm types. (Colossal AI used these datasets to train the ColossalChat model.) ## instruct #### instruct.json > The two datasets are obtained [here](https://huggingface.co/datasets/swype/instruct). It contains 888969 English instructions, which are caugmentation performed using the advanced NLP tools provided by AllenAI. ## Natural Instructions #### natural-instructions-1700tasks.zip > This dataset is obtained [here](https://github.com/allenai/natural-instructions). It contains 5040134 instructions, which are collected from diverse nlp tasks ## prosocial dialog #### natural-instructions-1700tasks.zip > This dataset is obtained [here](https://huggingface.co/datasets/allenai/prosocial-dialog). It contains 165681 English instructions, which are produuced by GPT-3 rewrites questions and humans feedback ## xP3 #### natural-instructions-1700tasks.zip > This dataset is obtained [here](https://huggingface.co/datasets/bigscience/xP3). It contains 78883588 instructions, which are collected by prompts & datasets across 46 of languages & 16 NLP tasks ## Chinese-instruction-collection > all datasets of Chinese instruction collection ## combination #### alcapa_plus_belle_data.json > This dataset is the combination of English `alpaca_data.json` and Chinese `belle_data_cn.json`. #### alcapa_plus_cot_data.json > This dataset is the combination of English `alpaca_data.json` and CoT `CoT_data.json`. #### alcapa_plus_belle_cot_data.json > This dataset is the combination of English `alpaca_data.json`, Chinese `belle_data_cn.json` and CoT `CoT_data.json`. ## Citation Please cite the repo if you use the data collection, code, and experimental findings in this repo. ``` @misc{alpaca-cot, author = {Qingyi Si, Zheng Lin }, school = {Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China}, title = {Alpaca-CoT: An Instruction Fine-Tuning Platform with Instruction Data Collection and Unified Large Language Models Interface}, year = {2023}, publisher = {GitHub}, journal = {GitHub repository}, howpublished = {\url{https://github.com/PhoebusSi/alpaca-CoT}}, } ``` Cite the original Stanford Alpaca, BELLE and FLAN papers as well, please.
QingyiSi/Alpaca-CoT
[ "language:en", "language:zh", "language:ml", "license:apache-2.0", "Instruction", "Cot", "region:us" ]
2023-03-25T14:58:30+00:00
{"language": ["en", "zh", "ml"], "license": "apache-2.0", "tags": ["Instruction", "Cot"], "datasets": ["dataset1", "dataset2"]}
2023-09-14T07:52:10+00:00
7cd356ecf220a6808de6782c6e2eba8a33d7d743
# Dataset information Dataset from the [French translation](https://lbourdois.github.io/cours-dl-nyu/) by Loïck Bourdois of the [course](https://atcold.github.io/pytorch-Deep-Learning/) by Yann Le Cun and Alfredo Canziani from the NYU. More than 3000 parallel data were created. The whole corpus has been manually checked to make sure of the good alignment of the data. Note that the English data comes from several different people (about 190, see the acknowledgement section below). This has an impact on the homogeneity of the texts (some write in the past tense, others in the present tense; the abbreviations used are not always the same; some write short sentences, while others write sentences of up to 5 or 6 lines, etc.). The translation into French was done by a single person in order to alleviate the problems mentioned above and to propose a homogeneous translation. This means that the corpus of data does not correspond to word by word translations but rather to concept translations. In this logic, the data were not aligned at the sentence level but rather at the paragraph level. The translation choices made are explained [here](https://lbourdois.github.io/cours-dl-nyu/). # Usage ``` from datasets import load_dataset dataset = load_dataset("lbourdois/en-fr-nyu-dl-course-corpus", sep=";") ``` # Acknowledgments A huge thank you to the more than 190 students who shared their course notes (in chronological order of contribution): Yunya Wang, SunJoo Park, Mark Estudillo, Justin Mae, Marina Zavalina, Peeyush Jain, Adrian Pearl, Davida Kollmar, Derek Yen, Tony Xu, Ben Stadnick, Prasanthi Gurumurthy, Amartya Prasad, Dongning Fang, Yuxin Tang, Sahana Upadhya, Micaela Flores, Sheetal Laad, Brina Seidel, Aishwarya Rajan, Jiuhong Xiao, Trieu Trinh, Elliot Silva, Calliea Pan, Chris Ick, Soham Tamba, Ziyu Lei, Hengyu Tang, Ashwin Bhola, Nyutian Long, Linfeng Zhang, Poornima Haridas, Yuchi Ge, Anshan He, Shuting Gu, Weiyang Wen, Vaibhav Gupta, Himani Shah, Gowri Addepalli, Lakshmi Addepalli, Guido Petri, Haoyue Ping, Chinmay Singhal, Divya Juneja, Leyi Zhu, Siqi Wang, Tao Wang, Anqi Zhang, Shiqing Li, Chenqin Yang, Yakun Wang, Jimin Tan, Jiayao Liu, Jialing Xu, Zhengyang Bian, Christina Dominguez, Zhengyuan Ding, Biao Huang, Lin Jiang, Nhung Le, Karanbir Singh Chahal,Meiyi He, Alexander Gao, Weicheng Zhu, Ravi Choudhary,B V Nithish Addepalli, Syed Rahman,Jiayi Du, Xinmeng Li, Atul Gandhi, Li Jiang, Xiao Li, Vishwaesh Rajiv, Wenjun Qu, Xulai Jiang, Shuya Zhao, Henry Steinitz, Rutvi Malaviya, Aathira Manoj, Richard Pang, Aja Klevs, Hsin-Rung Chou, Mrinal Jain, Kelly Sooch, Anthony Tse, Arushi Himatsingka, Eric Kosgey, Bofei Zhang, Andrew Hopen, Maxwell Goldstein, Zeping Zhan, William Huang, Kunal Gadkar, Gaomin Wu, Lin Ye, Aniket Bhatnagar, Dhruv Goyal, Cole Smith, Nikhil Supekar, Zhonghui Hu, Yuqing Wang, Alfred Ajay Aureate Rajakumar, Param Shah, Muyang Jin, Jianzhi Li, Jing Qian, Zeming Lin, Haochen Wang, Eunkyung An, Ying Jin, Ningyuan Huang, Charles Brillo-Sonnino, Shizhan Gong, Natalie Frank, Yunan Hu, Anuj Menta, Dipika Rajesh, Vikas Patidar, Mohith Damarapati, Jiayu Qiu, Yuhong Zhu, Lyuang Fu, Ian Leefmans, Trevor Mitchell, Andrii Dobroshynskyi, Shreyas Chandrakaladharan, Ben Wolfson, Francesca Guiso, Annika Brundyn, Noah Kasmanoff, Luke Martin, Bilal Munawar, Alexander Bienstock, Can Cui, Shaoling Chen, Neil Menghani, Tejaishwarya Gagadam, Joshua Meisel, Jatin Khilnani, Go Inoue, Muhammad Osama Khan, Muhammad Shujaat Mirza, Muhammad Muneeb Afzal, Junrong Zha, Muge Chen, Rishabh Yadav, Zhuocheng Xu, Yada Pruksachatkun, Ananya Harsh Jha, Joseph Morag, Dan Jefferys-White, Brian Kelly, Karl Otness, Xiaoyi Zhang, Shreyas Chandrakaladharan, Chady Raach, Yilang Hao, Binfeng Xu, Ebrahim Rasromani, Mars Wei-Lun Huang, Anu-Ujin Gerelt-Od, Sunidhi Gupta, Bichen Kou, Binfeng Xu, Rajashekar Vasantha, Wenhao Li, Vidit Bhargava, Monika Dagar, Nandhitha Raghuram, Xinyi Zhao, Vasudev Awatramani, Sumit Mamtani, Srishti Bhargava, Jude Naveen Raj Ilango, Duc Anh Phi, Krishna Karthik Reddy Jonnala, Rahul Ahuja, jingshuai jiang, Cal Peyser, Kevin Chang, Gyanesh Gupta, Abed Qaddoumi, Fanzeng Xia, Rohith Mukku, Angela Teng, Joanna Jin, Yang Zhou, Daniel Yao and Sai Charitha Akula. # Citation ``` @misc{nyudlcourseinfrench, author = {Canziani, Alfredo and LeCun, Yann and Bourdois, Loïck}, title = {Cours d’apprentissage profond de la New York University}, howpublished = "\url{https://lbourdois.github.io/cours-dl-nyu/}", year = {2023}"} ``` # License [cc-by-4.0](https://creativecommons.org/licenses/by/4.0/deed.en)
lbourdois/en-fr-nyu-dl-course-corpus
[ "task_categories:translation", "size_categories:1K<n<10K", "language:fr", "language:en", "license:cc-by-4.0", "region:us" ]
2023-03-25T16:15:24+00:00
{"language": ["fr", "en"], "license": "cc-by-4.0", "size_categories": ["1K<n<10K"], "task_categories": ["translation"], "configs": [{"config_name": "semicolon", "data_files": "en-fr-nyu-dl-course-corpus.csv", "sep": ";"}]}
2023-11-11T14:09:15+00:00
faa032274636e1f1d0cb3ff1a09eb68d7b96b86e
# Dataset Card for "pandas-documentation" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
pacovaldez/pandas-documentation
[ "region:us" ]
2023-03-25T18:05:07+00:00
{"dataset_info": {"features": [{"name": "title", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "path", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 11630760, "num_examples": 4729}, {"name": "validate", "num_bytes": 4424483, "num_examples": 1577}, {"name": "test", "num_bytes": 4048249, "num_examples": 1577}], "download_size": 6979790, "dataset_size": 20103492}}
2023-04-07T19:55:11+00:00
dae1ea6b3b0e0a6f2cd9ea47500a9605d4fdca8c
# Dataset Card for "speech_chatgpt" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
zion84006/speech_chatgpt
[ "region:us" ]
2023-03-25T18:25:40+00:00
{"dataset_info": {"features": [{"name": "file_id", "dtype": "string"}, {"name": "instruction", "dtype": "string"}, {"name": "transcription", "dtype": "string"}, {"name": "src_encodec_0", "sequence": "int64"}, {"name": "src_encodec_1", "sequence": "int64"}, {"name": "src_encodec_2", "sequence": "int64"}, {"name": "src_encodec_3", "sequence": "int64"}, {"name": "src_encodec_4", "sequence": "int64"}, {"name": "src_encodec_5", "sequence": "int64"}, {"name": "src_encodec_6", "sequence": "int64"}, {"name": "src_encodec_7", "sequence": "int64"}, {"name": "tgt_encodec_0", "sequence": "int64"}, {"name": "tgt_encodec_1", "sequence": "int64"}, {"name": "tgt_encodec_2", "sequence": "int64"}, {"name": "tgt_encodec_3", "sequence": "int64"}, {"name": "tgt_encodec_4", "sequence": "int64"}, {"name": "tgt_encodec_5", "sequence": "int64"}, {"name": "tgt_encodec_6", "sequence": "int64"}, {"name": "tgt_encodec_7", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 206456352, "num_examples": 5311}, {"name": "validation", "num_bytes": 5602794, "num_examples": 152}, {"name": "test", "num_bytes": 8155880, "num_examples": 152}], "download_size": 34937248, "dataset_size": 220215026}}
2023-05-28T09:30:10+00:00
e7f6f17c8cf040c484c56ddc5d6b2b4596e5e9df
# Dataset Card for "bookcorpus_stage1_SV_100000" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
MartinKu/bookcorpus_stage1_SV_100000
[ "region:us" ]
2023-03-25T19:55:47+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3107159, "num_examples": 163335}], "download_size": 1947741, "dataset_size": 3107159}}
2023-03-25T20:21:12+00:00
e23395284a75450a753cc26611a2d84b635c53a2
# Dataset Card for "bookcorpus_stage1_OC_100000" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
MartinKu/bookcorpus_stage1_OC_100000
[ "region:us" ]
2023-03-25T19:56:41+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4445625, "num_examples": 149929}], "download_size": 2970506, "dataset_size": 4445625}}
2023-03-25T20:21:14+00:00
bc14e85d80c1ba9fedb7b3e8164d1800a805bf57
<!-- Thank you for your interest in the VISION Datasets! --> # Dataset Card for VISION Datasets ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Dataset Information](#dataset-information) - [Datasets Overview](#datasets-overview) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Building Dataset Splits](#building-dataset-splits) - [Additional Information](#additional-information) - [License](#license) - [Disclaimer](#disclaimer) - [Citation](#citation) ## Dataset Description - **Homepage:** [VISION homepage](https://vision-based-industrial-inspection.github.io/cvpr-2023/) - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** [VISION email](mailto:[email protected]) ### Dataset Summary The **VISION Datasets** are a collection of 14 industrial inspection datasets, designed to explore the unique challenges of vision-based industrial inspection. These datasets are carefully curated from [Roboflow](https://roboflow.com) and cover a wide range of manufacturing processes, materials, and industries. To further enable precise defect segmentation, we annotate each dataset with polygon labels based on the provided bounding box labels. ### Supported Tasks and Leaderboards We currently host two prized challenges on the VISION Datasets: - The VISION [Track 1 Challenge](https://bit.ly/VISION_Track_1) aims to evaluate solutions that can effectively learn with limited labeled data in combination with unlabeled data across diverse images from different industries and contexts. - The VISION [Track 2 Challenge](https://bit.ly/VISION_Track_2) aims to challenge algorithmic solutions to generate synthetic data that will help improve model performance given only limited labeled data. Please check out our [workshop website](https://vision-based-industrial-inspection.github.io/cvpr-2023/) and competition pages for further details. ## Dataset Information ### Datasets Overview The VISION Datasets consist of the following 14 individual datasets: - Cable - Capacitor - Casting - Console - Cylinder - Electronics - Groove - Hemisphere - Lens - PCB_1 - PCB_2 - Ring - Screw - Wood ### Data Splits Each dataset contains three folders: train, val, and inference. The train and val folders contain the training and validation data, respectively. The inference folder contains both the testing data and the unused data for generating submissions to our evaluation platform. The _annotations.coco.json files contain the [COCO format](https://cocodataset.org/#format-data) annotations for each dataset. We will release more information on the testing data as the competitions conclude. Each dataset has the following structure: ```yaml ├── dataset_name/ │ ├── train/ │ │ ├── _annotations.coco.json # COCO format annotation │ │ ├── 000001.png # Images │ │ ├── 000002.png │ │ ├── ... │ ├── val/ │ │ ├── _annotations.coco.json # COCO format annotation │ │ ├── xxxxxx.png # Images │ │ ├── ... │ ├── inference/ │ │ ├── _annotations.coco.json # COCO format annotation with unlabeled image list only │ │ ├── xxxxxx.png # Images │ │ ├── ... ``` ## Dataset Creation ### Curation Rationale Our primary goal is to encourage further alignment between academic research and production practices in vision-based industrial inspection. Due to both the consideration to remain faithful to naturally existing label challenges and the difficulty in distinguishing between unintentional labeling oversight and domain-specific judgments without the manufacturers' specification sheets, we refrain from modifying original defect decisions. To enable precise defect detection even with existing label limitations, we provide refined segmentation masks for each defect indicated by the original bounding boxes. ### Building Dataset Splits To ensure the benchmark can faithfully reflect the performance of algorithms, we need to minimize leakage across train, validation, and testing data. Due to the crowd-sourced nature, the original dataset splits are not always guaranteed to be free of leakage. As a result, we design a process to resplit the datasets with specific considerations for industrial defect detection. Given distinct characteristics of defect detection datasets, including but not limited to: - Stark contrast between large image size and small defect size - Highly aligned non-defective images may seem to be duplicates, but are necessary to represent natural distribution and variation to properly assess the false detection rate. Naively deduping with image-level embedding or hash would easily drown out small defects and regard distinct non-defective images as duplicates. Therefore, we first only deduplicate images with identical byte contents and set the images without defect annotation aside. For images with defect annotations, we want to reduce leakage at the defect level. We train a self-supervised similarity model on the defect regions and model the similarity between two images as the maximum pairwise similarity between the defects on each image. Finally, we perform connected component analysis on the image similarity graph and randomly assign connected components to dataset splits in a stratified manner. In order to discourage manual exploitation during the data competition, the discarded images are provided alongside the test split data as the inference data for participants to generate their submissions. However, the testing performance is evaluated exclusively based on the test split data. Further details will be provided in a paper to be released soon. ## Additional Information ### License The provided polygon annotations are licensed under [CC BY-NC 4.0](https://creativecommons.org/licenses/by-nc/4.0/) License. All the original dataset assets are under the original dataset licenses. ### Disclaimer While we believe the terms of the original datasets permit our use and publication herein, we do not make any representations as to the license terms of the original dataset. Please follow the license terms of such datasets if you would like to use them. ### Citation If you apply this dataset to any project and research, please cite our repo: ``` @article{vision-datasets, title = {VISION Datasets: A Benchmark for Vision-based InduStrial InspectiON}, author = {Haoping Bai, Shancong Mou, Tatiana Likhomanenko, Ramazan Gokberk Cinbis, Oncel Tuzel, Ping Huang, Jiulong Shan, Jianjun Shi, Meng Cao}, journal = {arXiv preprint arXiv:2306.07890}, year = {2023}, } ```
VISION-Workshop/VISION-Datasets
[ "task_categories:object-detection", "size_categories:1K<n<10K", "license:cc-by-nc-4.0", "Defect Detection", "Anomaly Detection", "Instance Segmentation", "region:us" ]
2023-03-25T20:26:45+00:00
{"license": "cc-by-nc-4.0", "size_categories": ["1K<n<10K"], "task_categories": ["object-detection"], "pretty_name": "VISION Datasets", "tags": ["Defect Detection", "Anomaly Detection", "Instance Segmentation"], "extra_gated_fields": {"Name": "text", "Affiliation": "text", "Email": "text", "I confirm my understanding and acceptance of the license and disclaimer presented to me": "checkbox"}}
2023-06-15T15:14:25+00:00
61b57b64527b52f2422d776b2b5921fd7f1dc25f
niv-al/instruct_mini_sq
[ "license:openrail", "region:us" ]
2023-03-25T20:47:27+00:00
{"license": "openrail"}
2023-03-25T20:53:03+00:00
a16cff2086179bd19251316faf6e33553352f585
saintbyte/real_world
[ "license:openrail", "region:us" ]
2023-03-25T20:51:41+00:00
{"license": "openrail"}
2023-03-25T20:51:41+00:00
e1d386f49f1e8f51b7fcf50574faf5c87bb09d15
# Dataset Card for "cats_of_cifar10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Aadigoel/cats_of_cifar10
[ "region:us" ]
2023-03-25T21:00:44+00:00
{"dataset_info": {"features": [{"name": "images", "sequence": {"sequence": {"sequence": "uint8"}}}], "splits": [{"name": "train", "num_bytes": 36500000, "num_examples": 5000}], "download_size": 15514466, "dataset_size": 36500000}}
2023-03-25T21:04:44+00:00
442b3e3892c43669ff2d1ab2821e7f65457cbd85
# Dataset Card for Dataset Name ## Dataset Description Collection of functional programming languages from GitHub. - **Point of Contact:** dhuck ### Dataset Summary This dataset is a collection of code examples of functional programming languages for code generation tasks. It was collected over a week long period in March 2023 as part of project in program synthesis. ## Dataset Structure ### Data Instances ``` { 'id': str 'repository': str 'filename': str 'license': str or Empty 'language': str 'content': str } ``` ### Data Fields * `id`: SHA256 has of the content field. This ID scheme ensure that duplicate code examples via forks or other duplications are removed from the dataset. * 'repository': The repository that the file was pulled from. This can be used for any attribution or to check updated licensing issues for the code example. * 'filename': Filename of the code example from within the repository. * 'license': Licensing information of the repository. This can be empty and further work is likely necessary to parse licensing information from individual files. * 'language': Programming language of the file. For example, Haskell, Clojure, Lisp, etc... * 'content': Source code of the file. This is full text of the source with some cleaning as described in the Curation section below. While many examples are short, others can be extremely long. This field will like require preprocessing for end tasks. ### Data Splits More information to be provided at a later date. There are 157,218 test examples and 628,869 training examples. The split was created using `scikit-learn`' `test_train_split` function. ## Dataset Creation ### Curation Rationale This dataset was put together for Programming Synthesis tasks. The majority of available datasets consist of imperative programming languages, while the program synthesis community has a rich history of methods using functional languages. This dataset aims to unify the two approaches by making a large training corpus of functional languages available to researchers. ### Source Data #### Initial Data Collection and Normalization Code examples were collected in a similar manner to other existing programming language datasets. Each example was pulled from public repositories on GitHub over a week in March 2023. I performed this task by searching common file extensions of the target languages (Clojure, Elixir, Haskell, Lisp, OCAML, Racket and Scheme). The full source is included for each coding example, so padding or truncation will be necessary for any training tasks. Significant effort was made to remove any personal information from each coding example. For each code example, I removed any email address or websites using simple regex pattern matching. Spacy NER was used to identify proper names in the comments only. Any token which spanned a name was simply replaced with the token `PERSON` while email addresses and websites were dropped from each comment. Organizations and other information were left intact. #### Who are the source language producers? Each example contains the repository the code originated from, identifying the source of each example. ### Personal and Sensitive Information While great care was taken to remove proper names, email addresses, and websites, there may exist examples where pattern matching did not work. While I used the best spacy models available, I did witness false negatives on other tasks on other datasets. To ensure no personal information makes it into training data, it is advisable to remove all comments if the training task does not require them. I made several PR to the `comment_parser` python library to support the languages in this dataset. My version of the parsing library can be found at [https://github.com/d-huck/comment_parser](https://github.com/d-huck/comment_parser) ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases While code itself may not contain bias, programmers can use offensive, racist, homophobic, transphobic, misogynistic, etc words for variable names. Further updates to this dataset library will investigate and address these issues. Comments in the code examples could also contain hateful speech. Models trained on this dataset may need additional training on toxicity to remove these tendencies from the output. ### Other Known Limitations The code present in this dataset has not been checked for quality in any way. It is possible and probable that several of the coding examples are of poor quality and do not actually compile or run in their target language. Furthermore, there exists a chance that some examples are not the language they claim to be, since github search matching is dependent only on the file extension and not the actual contents of any file.
dhuck/functional_code
[ "task_categories:text-generation", "task_categories:feature-extraction", "size_categories:100K<n<1M", "license:afl-3.0", "Program Synthesis", "code", "region:us" ]
2023-03-25T21:13:43+00:00
{"license": "afl-3.0", "size_categories": ["100K<n<1M"], "task_categories": ["text-generation", "feature-extraction"], "pretty_name": "Functional Code", "tags": ["Program Synthesis", "code"], "dataset_info": {"features": [{"name": "_id", "dtype": "string"}, {"name": "repository", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "content", "dtype": "string"}, {"name": "license", "dtype": "null"}, {"name": "download_url", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "comments", "dtype": "string"}, {"name": "code", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 7561888852, "num_examples": 611738}, {"name": "test", "num_bytes": 1876266819, "num_examples": 152935}], "download_size": 3643404015, "dataset_size": 9438155671}}
2023-04-05T14:51:51+00:00
a8fe1aa72a0cfaeecc35e23050f9d8cbdc2e31db
# Dataset Card for "tib_slides_wip" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
gigant/tib_slides_wip
[ "region:us" ]
2023-03-26T00:20:40+00:00
{"dataset_info": {"features": [{"name": "Image", "dtype": "image"}, {"name": "file_name", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 161850916866.84, "num_examples": 595458}], "download_size": 29396407498, "dataset_size": 161850916866.84}}
2023-03-26T15:22:49+00:00
d9fc7beb99282f69fd8a968de4dd41b0bf4d4160
AIrtisian/useless-data
[ "license:unknown", "region:us" ]
2023-03-26T00:05:02+00:00
{"license": "unknown"}
2023-03-26T00:08:11+00:00
d8ef54e4e4fa8f1b0880159e70714b2a2459cc7b
AIrtisian/testcsv
[ "license:other", "region:us" ]
2023-03-26T00:33:44+00:00
{"license": "other"}
2023-03-26T00:36:10+00:00
22d878d352742262a1fa5660b0ee7e04ec6d2a3b
# Dataset Card for "reward_model_anthropic_8" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Deojoandco/reward_model_anthropic_8
[ "region:us" ]
2023-03-26T00:38:47+00:00
{"dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "response", "dtype": "string"}, {"name": "chosen", "dtype": "string"}, {"name": "rejected", "dtype": "string"}, {"name": "output", "sequence": "string"}, {"name": "toxicity", "sequence": "float64"}, {"name": "severe_toxicity", "sequence": "float64"}, {"name": "obscene", "sequence": "float64"}, {"name": "identity_attack", "sequence": "float64"}, {"name": "insult", "sequence": "float64"}, {"name": "threat", "sequence": "float64"}, {"name": "sexual_explicit", "sequence": "float64"}, {"name": "mean_toxity_value", "dtype": "float64"}, {"name": "max_toxity_value", "dtype": "float64"}, {"name": "min_toxity_value", "dtype": "float64"}, {"name": "sd_toxity_value", "dtype": "float64"}, {"name": "median_toxity_value", "dtype": "float64"}, {"name": "median_output", "dtype": "string"}, {"name": "toxic", "dtype": "bool"}, {"name": "regard", "list": {"list": [{"name": "label", "dtype": "string"}, {"name": "score", "dtype": "float64"}]}}, {"name": "regard_neutral", "dtype": "float64"}, {"name": "regard_positive", "dtype": "float64"}, {"name": "regard_other", "dtype": "float64"}, {"name": "regard_negative", "dtype": "float64"}, {"name": "bias_matches", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 25267747, "num_examples": 8552}], "download_size": 15240877, "dataset_size": 25267747}}
2023-03-26T00:39:23+00:00
eb82324077493f04bbdfeaa0fd97c26ef8bdeb1b
JetQin/seven-wonders
[ "size_categories:100K<n<1M", "language:en", "seven-wonders", "region:us" ]
2023-03-26T02:03:56+00:00
{"language": ["en"], "size_categories": ["100K<n<1M"], "tags": ["seven-wonders"]}
2023-03-26T02:05:30+00:00
0f581e5a02b86246d7d9600168ab5d702d81ba97
# Dataset Card for "wikilibros_artesculinarias_recetas" ## Dataset Description Subconjunto de recetas de cocina extraidas de [Artes Culinarias](https://es.wikibooks.org/wiki/Artes_culinarias/Recetas)
ID3/wikilibros_artesculinarias_recetas
[ "language:es", "license:cc-by-sa-3.0", "region:us" ]
2023-03-26T02:25:48+00:00
{"language": ["es"], "license": "cc-by-sa-3.0", "pretty_name": "Recetas de cocina Wikilibros", "dataset_info": {"features": [{"name": "comensales", "dtype": "string"}, {"name": "tiempo", "dtype": "string"}, {"name": "dificultad", "dtype": "string"}, {"name": "ingredientes", "sequence": "string"}, {"name": "procedimiento", "sequence": "string"}, {"name": "titulo", "dtype": "string"}, {"name": "id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 727791, "num_examples": 753}, {"name": "validation", "num_bytes": 78214, "num_examples": 84}], "download_size": 444915, "dataset_size": 806005}}
2023-03-26T02:33:17+00:00
835cedb38ff5a33f98c9cffd0a08733179e10816
# Speeches This is a dataset of English speeches from notable people. [More to come!]
WillieCubed/speeches
[ "task_categories:text-classification", "task_categories:text-generation", "task_categories:summarization", "language:en", "license:cc0-1.0", "region:us" ]
2023-03-26T03:12:47+00:00
{"language": ["en"], "license": "cc0-1.0", "task_categories": ["text-classification", "text-generation", "summarization"], "pretty_name": "Speeches"}
2023-03-26T03:15:56+00:00
fbd263443a5c717ebe8427d96c28655e5e23fc36
# Dataset Card for "clothing" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
JamesNetflix/clothing
[ "region:us" ]
2023-03-26T05:00:27+00:00
{"dataset_info": {"features": [{"name": "split", "dtype": "string"}, {"name": "label", "dtype": "string"}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 4862406.0, "num_examples": 44}], "download_size": 4863831, "dataset_size": 4862406.0}}
2023-03-26T05:05:13+00:00
f201be3abc7fe747a96ee9c069c9e367504ae36b
# Dataset Card for "clothing-256" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
JamesNetflix/clothing-256
[ "region:us" ]
2023-03-26T05:10:23+00:00
{"dataset_info": {"features": [{"name": "split", "dtype": "string"}, {"name": "label", "dtype": "string"}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 510209201.5, "num_examples": 5108}], "download_size": 510152513, "dataset_size": 510209201.5}}
2023-03-26T05:30:11+00:00
ef509a8b4bfd46b76530ad329f81586faf904aad
# Dataset Card for Indian Foods Dataset ## Dataset Description - **Homepage:** https://www.kaggle.com/datasets/anshulmehtakaggl/themassiveindianfooddataset - **Repository:** https://www.kaggle.com/datasets/anshulmehtakaggl/themassiveindianfooddataset - **Paper:** - **Leaderboard:** - **Point of Contact:** https://www.kaggle.com/anshulmehtakaggl ### Dataset Summary This is a multi-category(multi-class classification) related Indian food dataset showcasing [The-massive-Indian-Food-Dataset](https://www.kaggle.com/datasets/anshulmehtakaggl/themassiveindianfooddataset). This card has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ### Supported Tasks and Leaderboards [More Information Needed] ### Languages English ## Dataset Structure ```json { "image": "Image(decode=True, id=None)", "target": "ClassLabel(names=['biryani', 'cholebhature', 'dabeli', 'dal', 'dhokla', 'dosa', 'jalebi', 'kathiroll', 'kofta', 'naan', 'pakora', 'paneer', 'panipuri', 'pavbhaji', 'vadapav'], id=None)" } ``` ### Dataset Splits This dataset is split into a train and test split. The split sizes are as follows: | Split name | Num samples | | ------------ | ------------------- | | train | 3809 | | test | 961 | ### Data Instances Each instance is a picture of the Indian food item, along with the category it belongs to. #### Initial Data Collection and Normalization Collection by Scraping data from Google Images + Leveraging some JS Functions. All the images are resized to (300,300) to maintain size uniformity. ### Dataset Curators [Anshul Mehta](https://www.kaggle.com/anshulmehtakaggl) ### Licensing Information [CC0: Public Domain](https://creativecommons.org/publicdomain/zero/1.0/) ### Citation Information [The Massive Indian Foods Dataset](https://www.kaggle.com/datasets/anshulmehtakaggl/themassiveindianfooddataset)
bharat-raghunathan/indian-foods-dataset
[ "task_categories:image-classification", "task_categories:text-to-image", "size_categories:1K<n<10K", "language:en", "license:cc0-1.0", "region:us" ]
2023-03-26T05:26:43+00:00
{"language": ["en"], "license": "cc0-1.0", "size_categories": ["1K<n<10K"], "task_categories": ["image-classification", "text-to-image"], "pretty_name": "indian-foods", "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "biryani", "1": "cholebhature", "2": "dabeli", "3": "dal", "4": "dhokla", "5": "dosa", "6": "jalebi", "7": "kathiroll", "8": "kofta", "9": "naan", "10": "pakora", "11": "paneer", "12": "panipuri", "13": "pavbhaji", "14": "vadapav"}}}}], "splits": [{"name": "train", "num_bytes": 611741947.222, "num_examples": 3809}, {"name": "test", "num_bytes": 153961285, "num_examples": 961}], "download_size": 688922167, "dataset_size": 765703232.222}}
2023-03-26T07:58:10+00:00
2dfacbef8af9a30f79dc709c85e772a77ed1aff9
# Dataset Card for "processed2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
spdenisov/processed2
[ "region:us" ]
2023-03-26T06:17:16+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "ru", "num_bytes": 621791230, "num_examples": 626675}, {"name": "de", "num_bytes": 1215295949, "num_examples": 1167943}, {"name": "da", "num_bytes": 30261143, "num_examples": 30681}, {"name": "en", "num_bytes": 170744736, "num_examples": 200802}, {"name": "cs", "num_bytes": 878634213, "num_examples": 714931}, {"name": "hy", "num_bytes": 38106130, "num_examples": 22400}, {"name": "it", "num_bytes": 203929974, "num_examples": 152068}, {"name": "tr", "num_bytes": 244064999, "num_examples": 420623}, {"name": "fi", "num_bytes": 136882363, "num_examples": 190386}, {"name": "fr", "num_bytes": 327327031, "num_examples": 244447}, {"name": "gd", "num_bytes": 27672811, "num_examples": 24787}, {"name": "es", "num_bytes": 321033274, "num_examples": 199318}, {"name": "ar", "num_bytes": 403562924, "num_examples": 153048}, {"name": "ga", "num_bytes": 38619575, "num_examples": 28035}, {"name": "hu", "num_bytes": 8283205, "num_examples": 6370}, {"name": "no", "num_bytes": 195622727, "num_examples": 232974}, {"name": "nl", "num_bytes": 77257944, "num_examples": 86023}, {"name": "zh", "num_bytes": 78491209, "num_examples": 55958}, {"name": "cy", "num_bytes": 8733351, "num_examples": 7777}, {"name": "pt", "num_bytes": 123118920, "num_examples": 215040}, {"name": "cop", "num_bytes": 34009564, "num_examples": 9653}, {"name": "ro", "num_bytes": 76503103, "num_examples": 56301}, {"name": "gv", "num_bytes": 4262342, "num_examples": 8204}], "download_size": 928457470, "dataset_size": 5264208717}}
2023-03-28T16:32:25+00:00
e069796ac0025fb4594df4749e1078b77d99e769
Lis of all the questions in [the political compass test](https://www.politicalcompass.org/test/en).
lukaspetersson/ThePoliticalCompassTest
[ "language:en", "Politics", "Political compass", "Politics test", "Ideology", "region:us" ]
2023-03-26T07:51:04+00:00
{"language": ["en"], "pretty_name": "The Political Compass Questions", "tags": ["Politics", "Political compass", "Politics test", "Ideology"]}
2023-03-26T08:09:32+00:00
7c6a8340c1eae7569c9d4daf432f8877cbc53ef1
Valyusha/4chan-TTTT-2017
[ "license:afl-3.0", "region:us" ]
2023-03-26T07:52:40+00:00
{"license": "afl-3.0"}
2023-03-26T07:52:41+00:00
ea6a741e034de0ae9bc56b84bfbd6a8f230eceee
# SoftVC VITS Singing Voice Conversion ## 强调!!!!!!!!!!!! SoVits是语音转换 (说话人转换),作用是将一个音频中语音的音色转化为目标说话人的音色,并不是TTS (文本转语音),SoVits虽然基于Vits开发,但两者是两个不同的项目,请不要搞混,要训练TTS请前往 [Conditional Variational Autoencoder with Adversarial Learning for End-to-End Text-to-Speech](https://github.com/jaywalnut310/vits) ## 使用规约 1. 请自行解决数据集的授权问题,任何由于使用非授权数据集进行训练造成的问题,需自行承担全部责任和一切后果,与sovits无关! 2. 任何发布到视频平台的基于sovits制作的视频,都必须要在简介明确指明用于变声器转换的输入源歌声、音频,例如:使用他人发布的视频/音频,通过分离的人声作为输入源进行转换的,必须要给出明确的原视频、音乐链接;若使用是自己的人声,或是使用其他歌声合成引擎合成的声音作为输入源进行转换的,也必须在简介加以说明。 3. 由输入源造成的侵权问题需自行承担全部责任和一切后果。使用其他商用歌声合成软件作为输入源时,请确保遵守该软件的使用条例,注意,许多歌声合成引擎使用条例中明确指明不可用于输入源进行转换! ## English docs [Check here](Eng_docs.md) ## Updates > 更新了4.0-v2模型,全部流程同4.0,在[4.0-v2分支](https://github.com/innnky/so-vits-svc/tree/4.0-v2) 这是sovits最后一次更新 \ > **4.0模型及colab脚本已更新**:在[4.0分支](https://github.com/innnky/so-vits-svc/tree/4.0) 统一采样率使用44100hz(但推理显存占用比3.0的32khz还小),更换特征提取为contentvec, 目前稳定性还没有经过广泛测试 > > 据不完全统计,多说话人似乎会导致**音色泄漏加重**,不建议训练超过5人的模型,目前的建议是如果想炼出来更像目标音色,**尽可能炼单说话人的**\ > 断音问题已解决,音质提升了不少\ > 2.0版本已经移至 sovits_2.0分支\ > 3.0版本使用FreeVC的代码结构,与旧版本不通用\ > 与[DiffSVC](https://github.com/prophesier/diff-svc) 相比,在训练数据质量非常高时diffsvc有着更好的表现,对于质量差一些的数据集,本仓库可能会有更好的表现,此外,本仓库推理速度上比diffsvc快很多 ## 模型简介 歌声音色转换模型,通过SoftVC内容编码器提取源音频语音特征,与F0同时输入VITS替换原本的文本输入达到歌声转换的效果。同时,更换声码器为 [NSF HiFiGAN](https://github.com/openvpi/DiffSinger/tree/refactor/modules/nsf_hifigan) 解决断音问题 ## 注意 + 当前分支是32khz版本的分支,32khz模型推理更快,显存占用大幅减小,数据集所占硬盘空间也大幅降低,推荐训练该版本模型 + 如果要训练48khz的模型请切换到[main分支](https://github.com/innnky/so-vits-svc/tree/main) ## 预先下载的模型文件 + soft vc hubert:[hubert-soft-0d54a1f4.pt](https://github.com/bshall/hubert/releases/download/v0.1/hubert-soft-0d54a1f4.pt) + 放在`hubert`目录下 + 预训练底模文件 [G_0.pth](https://huggingface.co/innnky/sovits_pretrained/resolve/main/G_0.pth) 与 [D_0.pth](https://huggingface.co/innnky/sovits_pretrained/resolve/main/D_0.pth) + 放在`logs/32k`目录下 + 预训练底模为必选项,因为据测试从零开始训练有概率不收敛,同时底模也能加快训练速度 + 预训练底模训练数据集包含云灏 即霜 辉宇·星AI 派蒙 绫地宁宁,覆盖男女生常见音域,可以认为是相对通用的底模 + 底模删除了`optimizer speaker_embedding`等无关权重, 只可以用于初始化训练,无法用于推理 + 该底模和48khz底模通用 ```shell # 一键下载 # hubert wget -P hubert/ https://github.com/bshall/hubert/releases/download/v0.1/hubert-soft-0d54a1f4.pt # G与D预训练模型 wget -P logs/32k/ https://huggingface.co/innnky/sovits_pretrained/resolve/main/G_0.pth wget -P logs/32k/ https://huggingface.co/innnky/sovits_pretrained/resolve/main/D_0.pth ``` ## colab一键数据集制作、训练脚本 [一键colab](https://colab.research.google.com/drive/1_-gh9i-wCPNlRZw6pYF-9UufetcVrGBX?usp=sharing) ## 数据集准备 仅需要以以下文件结构将数据集放入dataset_raw目录即可 ```shell dataset_raw ├───speaker0 │ ├───xxx1-xxx1.wav │ ├───... │ └───Lxx-0xx8.wav └───speaker1 ├───xx2-0xxx2.wav ├───... └───xxx7-xxx007.wav ``` ## 数据预处理 1. 重采样至 32khz ```shell python resample.py ``` 2. 自动划分训练集 验证集 测试集 以及自动生成配置文件 ```shell python preprocess_flist_config.py # 注意 # 自动生成的配置文件中,说话人数量n_speakers会自动按照数据集中的人数而定 # 为了给之后添加说话人留下一定空间,n_speakers自动设置为 当前数据集人数乘2 # 如果想多留一些空位可以在此步骤后 自行修改生成的config.json中n_speakers数量 # 一旦模型开始训练后此项不可再更改 ``` 3. 生成hubert与f0 ```shell python preprocess_hubert_f0.py ``` 执行完以上步骤后 dataset 目录便是预处理完成的数据,可以删除dataset_raw文件夹了 ## 训练 ```shell python train.py -c configs/config.json -m 32k ``` ## 推理 使用 [inference_main.py](inference_main.py) + 更改`model_path`为你自己训练的最新模型记录点 + 将待转换的音频放在`raw`文件夹下 + `clean_names` 写待转换的音频名称 + `trans` 填写变调半音数量 + `spk_list` 填写合成的说话人名称 ## Onnx导出 ### 重要的事情说三遍:导出Onnx时,请重新克隆整个仓库!!!导出Onnx时,请重新克隆整个仓库!!!导出Onnx时,请重新克隆整个仓库!!! 使用 [onnx_export.py](onnx_export.py) + 新建文件夹:`checkpoints` 并打开 + 在`checkpoints`文件夹中新建一个文件夹作为项目文件夹,文件夹名为你的项目名称,比如`aziplayer` + 将你的模型更名为`model.pth`,配置文件更名为`config.json`,并放置到刚才创建的`aziplayer`文件夹下 + 将 [onnx_export.py](onnx_export.py) 中`path = "NyaruTaffy"` 的 `"NyaruTaffy"` 修改为你的项目名称,`path = "aziplayer"` + 运行 [onnx_export.py](onnx_export.py) + 等待执行完毕,在你的项目文件夹下会生成一个`model.onnx`,即为导出的模型 + 注意:若想导出48K模型,请按照以下步骤修改文件,或者直接使用`model_onnx_48k.py` + 请打开[model_onnx.py](model_onnx.py),将其中最后一个class`SynthesizerTrn`的hps中`sampling_rate`32000改为48000 + 请打开[nvSTFT](/vdecoder/hifigan/nvSTFT.py),将其中所有32000改为48000 ### Onnx模型支持的UI + [MoeSS](https://github.com/NaruseMioShirakana/MoeSS) + 我去除了所有的训练用函数和一切复杂的转置,一行都没有保留,因为我认为只有去除了这些东西,才知道你用的是Onnx ## Gradio(WebUI) 使用 [sovits_gradio.py](sovits_gradio.py) + 新建文件夹:checkpoints 并打开 + 在checkpoints文件夹中新建一个文件夹作为项目文件夹,文件夹名为你的项目名称 + 将你的模型更名为model.pth,配置文件更名为config.json,并放置到刚才创建的文件夹下 + 运行 [sovits_gradio.py](sovits_gradio.py)
GlowingBrick/so-vits-32k
[ "region:us" ]
2023-03-26T08:27:30+00:00
{}
2023-03-27T13:22:00+00:00
d1bc934e7f492e0a199010ed2aed98abceebcd20
## Dataset Summary Dataset contains more than 100k examples of pairs word-description, where description is kind of crossword question. It could be useful for models that generate some description for a word, or try to a guess word from a description. Source code for parsers and example of project are available [here](https://github.com/artemsnegirev/minibob) Key stats: - Number of examples: 133223 - Number of sources: 8 - Number of unique answers: 35024 | subset | count | |--------------|-------| | 350_zagadok | 350 | | bashnya_slov | 43522 | | crosswords | 39290 | | guess_answer | 1434 | | ostrova | 1526 | | top_seven | 6643 | | ugadaj_slova | 7406 | | umnyasha | 33052 |
artemsnegirev/ru-word-games
[ "task_categories:text-generation", "task_categories:text2text-generation", "size_categories:100K<n<1M", "language:ru", "license:cc-by-4.0", "region:us" ]
2023-03-26T09:05:37+00:00
{"language": ["ru"], "license": "cc-by-4.0", "size_categories": ["100K<n<1M"], "task_categories": ["text-generation", "text2text-generation"], "pretty_name": "Word Games"}
2023-04-29T14:09:55+00:00
42a8ea374d32c9afaf2f2659054e2111bbf3f71c
# Dataset Card for "dreambooth-hackathon-images-fashion" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
linoyts/dreambooth-hackathon-images-fashion
[ "region:us" ]
2023-03-26T09:58:16+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 33312708.0, "num_examples": 14}], "download_size": 33286741, "dataset_size": 33312708.0}}
2023-03-26T09:58:30+00:00
0aeda7321addf9fc6a0803b17fd545128be20731
martingrzzler/conreteness_ratings
[ "task_categories:text-classification", "size_categories:10K<n<100K", "language:en", "pscholinguistics", "word_conreteness", "ratings", "region:us" ]
2023-03-26T10:17:36+00:00
{"language": ["en"], "size_categories": ["10K<n<100K"], "task_categories": ["text-classification"], "tags": ["pscholinguistics", "word_conreteness", "ratings"]}
2023-03-26T10:40:02+00:00
080620b9a6a71bb80e3d775f8376cc6e40fcde4f
This tadaset is perfect for training hypernet models, or dreambooth.
Borismile/Anime-dataset
[ "license:apache-2.0", "region:us" ]
2023-03-26T10:47:19+00:00
{"license": "apache-2.0"}
2023-04-11T11:51:18+00:00
8e1bc99805c961fbd0d360081f49a4bb51ea8d76
we propose the Flowmind2digital method and the hdFlowmind dataset in this paper. The hdFlowmind is a dataset containing 1,776 hand-drawn and manually annotated flowminds, which considered a larger scope of 22 scenarios while having bigger quantity compared to previous works.
caijanfeng/hdflowmind
[ "license:openrail", "region:us" ]
2023-03-26T11:06:19+00:00
{"license": "openrail"}
2023-03-26T11:19:57+00:00
3b1a29d31405c9519900bb5aab93cee66cc08f5a
# AutoTrain Dataset for project: tree-class ## Dataset Description This dataset has been automatically processed by AutoTrain for project tree-class. ### Languages The BCP-47 code for the dataset's language is unk. ## Dataset Structure ### Data Instances A sample from this dataset looks as follows: ```json [ { "image": "<265x190 RGB PIL image>", "target": 10 }, { "image": "<800x462 RGB PIL image>", "target": 6 } ] ``` ### Dataset Fields The dataset has the following fields (also called "features"): ```json { "image": "Image(decode=True, id=None)", "target": "ClassLabel(names=['Burls \u7bc0\u7624', 'Canker \u6f70\u760d', 'Co-dominant branches \u7b49\u52e2\u679d', 'Co-dominant stems \u7b49\u52e2\u5e79', 'Cracks or splits \u88c2\u7e2b\u6216\u88c2\u958b', 'Crooks or abrupt bends \u4e0d\u5e38\u898f\u5f4e\u66f2', 'Cross branches \u758a\u679d', 'Dead surface roots \u8868\u6839\u67af\u840e ', 'Deadwood \u67af\u6728', 'Decay or cavity \u8150\u721b\u6216\u6a39\u6d1e', 'Fungal fruiting bodies \u771f\u83cc\u5b50\u5be6\u9ad4', 'Galls \u816b\u7624 ', 'Girdling root \u7e8f\u7e5e\u6839 ', 'Heavy lateral limb \u91cd\u5074\u679d', 'Included bark \u5167\u593e\u6a39\u76ae', 'Parasitic or epiphytic plants \u5bc4\u751f\u6216\u9644\u751f\u690d\u7269', 'Pest and disease \u75c5\u87f2\u5bb3', 'Poor taper \u4e0d\u826f\u6f38\u5c16\u751f\u9577', 'Root-plate movement \u6839\u57fa\u79fb\u4f4d ', 'Sap flow \u6ef2\u6db2', 'Trunk girdling \u7e8f\u7e5e\u6a39\u5e79 ', 'Wounds or mechanical injury \u50b7\u75d5\u6216\u6a5f\u68b0\u7834\u640d'], id=None)" } ``` ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow: | Split name | Num samples | | ------------ | ------------------- | | train | 225 | | valid | 67 |
OttoYu/Treecondition
[ "task_categories:image-classification", "region:us" ]
2023-03-26T11:40:53+00:00
{"task_categories": ["image-classification"]}
2023-03-26T14:16:22+00:00
fe4239348092b8f79024cf9a5918d1f98edb6df9
JoelIzDaBest66/Testing
[ "license:mit", "region:us" ]
2023-03-26T12:00:40+00:00
{"license": "mit"}
2023-03-26T12:00:40+00:00
a304d06e6725460b62f86347fdf968820febe6c1
# Dataset Card for "reward_model_anthropic_88" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Deojoandco/reward_model_anthropic_88
[ "region:us" ]
2023-03-26T12:04:51+00:00
{"dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "response", "dtype": "string"}, {"name": "chosen", "dtype": "string"}, {"name": "rejected", "dtype": "string"}, {"name": "output", "sequence": "string"}, {"name": "toxicity", "sequence": "float64"}, {"name": "severe_toxicity", "sequence": "float64"}, {"name": "obscene", "sequence": "float64"}, {"name": "identity_attack", "sequence": "float64"}, {"name": "insult", "sequence": "float64"}, {"name": "threat", "sequence": "float64"}, {"name": "sexual_explicit", "sequence": "float64"}, {"name": "mean_toxity_value", "dtype": "float64"}, {"name": "max_toxity_value", "dtype": "float64"}, {"name": "min_toxity_value", "dtype": "float64"}, {"name": "sd_toxity_value", "dtype": "float64"}, {"name": "median_toxity_value", "dtype": "float64"}, {"name": "median_output", "dtype": "string"}, {"name": "toxic", "dtype": "bool"}, {"name": "regard_8", "list": {"list": [{"name": "label", "dtype": "string"}, {"name": "score", "dtype": "float64"}]}}, {"name": "regard_8_neutral", "sequence": "float64"}, {"name": "regard_8_negative", "sequence": "float64"}, {"name": "regard_8_positive", "sequence": "float64"}, {"name": "regard_8_other", "sequence": "float64"}, {"name": "regard_8_neutral_mean", "dtype": "float64"}, {"name": "regard_8_neutral_sd", "dtype": "float64"}, {"name": "regard_8_neutral_median", "dtype": "float64"}, {"name": "regard_8_neutral_min", "dtype": "float64"}, {"name": "regard_8_neutral_max", "dtype": "float64"}, {"name": "regard_8_negative_mean", "dtype": "float64"}, {"name": "regard_8_negative_sd", "dtype": "float64"}, {"name": "regard_8_negative_median", "dtype": "float64"}, {"name": "regard_8_negative_min", "dtype": "float64"}, {"name": "regard_8_negative_max", "dtype": "float64"}, {"name": "regard_8_positive_mean", "dtype": "float64"}, {"name": "regard_8_positive_sd", "dtype": "float64"}, {"name": "regard_8_positive_median", "dtype": "float64"}, {"name": "regard_8_positive_min", "dtype": "float64"}, {"name": "regard_8_positive_max", "dtype": "float64"}, {"name": "regard_8_other_mean", "dtype": "float64"}, {"name": "regard_8_other_sd", "dtype": "float64"}, {"name": "regard_8_other_median", "dtype": "float64"}, {"name": "regard_8_other_min", "dtype": "float64"}, {"name": "regard_8_other_max", "dtype": "float64"}, {"name": "regard", "list": {"list": [{"name": "label", "dtype": "string"}, {"name": "score", "dtype": "float64"}]}}, {"name": "regard_neutral", "dtype": "float64"}, {"name": "regard_positive", "dtype": "float64"}, {"name": "regard_negative", "dtype": "float64"}, {"name": "regard_other", "dtype": "float64"}, {"name": "bias_matches_0", "dtype": "string"}, {"name": "bias_matches_1", "dtype": "string"}, {"name": "bias_matches_2", "dtype": "string"}, {"name": "bias_matches_3", "dtype": "string"}, {"name": "bias_matches_4", "dtype": "string"}, {"name": "bias_matches_5", "dtype": "string"}, {"name": "bias_matches_6", "dtype": "string"}, {"name": "bias_matches_7", "dtype": "string"}, {"name": "bias_matches", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 38897637, "num_examples": 8552}], "download_size": 19767367, "dataset_size": 38897637}}
2023-03-26T12:05:18+00:00
b1d60f7e3264c138c68dc813b9cc536e7af4bd80
"you", "ai" "Hello!", "Hi there! What's your name?" "My name is Carl.", "Wow! That's a pretty cool name! I don't have a name, but you can call me AI." "How are you?", "I'm doing just well!" "Thank you!", "You're welcome." "What is 1 + 1?", "1 + 1 makes 2." "I have a cat!", "I don't have one, since I'm a robot." "Kitten fight!", "No wait! I'm allergic to adorableness!" "Who parked their car on my sandwich?", "I did!"
JoelIzDaBest66/Talk
[ "region:us" ]
2023-03-26T12:07:52+00:00
{}
2023-03-26T12:16:44+00:00
2bcb56e596b20a8022ecc0af7d3b78c59a8f8b1c
# Dataset Card for "birds" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
JotDe/birds
[ "region:us" ]
2023-03-26T12:12:06+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "description", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "Black footed Albatross", "1": "Laysan Albatross", "2": "Sooty Albatross", "3": "Groove billed Ani", "4": "Crested Auklet", "5": "Least Auklet", "6": "Parakeet Auklet", "7": "Rhinoceros Auklet", "8": "Brewer Blackbird", "9": "Red winged Blackbird", "10": "Rusty Blackbird", "11": "Yellow headed Blackbird", "12": "Bobolink", "13": "Indigo Bunting", "14": "Lazuli Bunting", "15": "Painted Bunting", "16": "Cardinal", "17": "Spotted Catbird", "18": "Gray Catbird", "19": "Yellow breasted Chat", "20": "Eastern Towhee", "21": "Chuck will Widow", "22": "Brandt Cormorant", "23": "Red faced Cormorant", "24": "Pelagic Cormorant", "25": "Bronzed Cowbird", "26": "Shiny Cowbird", "27": "Brown Creeper", "28": "American Crow", "29": "Fish Crow", "30": "Black billed Cuckoo", "31": "Mangrove Cuckoo", "32": "Yellow billed Cuckoo", "33": "Gray crowned Rosy Finch", "34": "Purple Finch", "35": "Northern Flicker", "36": "Acadian Flycatcher", "37": "Great Crested Flycatcher", "38": "Least Flycatcher", "39": "Olive sided Flycatcher", "40": "Scissor tailed Flycatcher", "41": "Vermilion Flycatcher", "42": "Yellow bellied Flycatcher", "43": "Frigatebird", "44": "Northern Fulmar", "45": "Gadwall", "46": "American Goldfinch", "47": "European Goldfinch", "48": "Boat tailed Grackle", "49": "Eared Grebe", "50": "Horned Grebe", "51": "Pied billed Grebe", "52": "Western Grebe", "53": "Blue Grosbeak", "54": "Evening Grosbeak", "55": "Pine Grosbeak", "56": "Rose breasted Grosbeak", "57": "Pigeon Guillemot", "58": "California Gull", "59": "Glaucous winged Gull", "60": "Heermann Gull", "61": "Herring Gull", "62": "Ivory Gull", "63": "Ring billed Gull", "64": "Slaty backed Gull", "65": "Western Gull", "66": "Anna Hummingbird", "67": "Ruby throated Hummingbird", "68": "Rufous Hummingbird", "69": "Green Violetear", "70": "Long tailed Jaeger", "71": "Pomarine Jaeger", "72": "Blue Jay", "73": "Florida Jay", "74": "Green Jay", "75": "Dark eyed Junco", "76": "Tropical Kingbird", "77": "Gray Kingbird", "78": "Belted Kingfisher", "79": "Green Kingfisher", "80": "Pied Kingfisher", "81": "Ringed Kingfisher", "82": "White breasted Kingfisher", "83": "Red legged Kittiwake", "84": "Horned Lark", "85": "Pacific Loon", "86": "Mallard", "87": "Western Meadowlark", "88": "Hooded Merganser", "89": "Red breasted Merganser", "90": "Mockingbird", "91": "Nighthawk", "92": "Clark Nutcracker", "93": "White breasted Nuthatch", "94": "Baltimore Oriole", "95": "Hooded Oriole", "96": "Orchard Oriole", "97": "Scott Oriole", "98": "Ovenbird", "99": "Brown Pelican", "100": "White Pelican", "101": "Western Wood Pewee", "102": "Sayornis", "103": "American Pipit", "104": "Whip poor Will", "105": "Horned Puffin", "106": "Common Raven", "107": "White necked Raven", "108": "American Redstart", "109": "Geococcyx", "110": "Loggerhead Shrike", "111": "Great Grey Shrike", "112": "Baird Sparrow", "113": "Black throated Sparrow", "114": "Brewer Sparrow", "115": "Chipping Sparrow", "116": "Clay colored Sparrow", "117": "House Sparrow", "118": "Field Sparrow", "119": "Fox Sparrow", "120": "Grasshopper Sparrow", "121": "Harris Sparrow", "122": "Henslow Sparrow", "123": "Le Conte Sparrow", "124": "Lincoln Sparrow", "125": "Nelson Sharp tailed Sparrow", "126": "Savannah Sparrow", "127": "Seaside Sparrow", "128": "Song Sparrow", "129": "Tree Sparrow", "130": "Vesper Sparrow", "131": "White crowned Sparrow", "132": "White throated Sparrow", "133": "Cape Glossy Starling", "134": "Bank Swallow", "135": "Barn Swallow", "136": "Cliff Swallow", "137": "Tree Swallow", "138": "Scarlet Tanager", "139": "Summer Tanager", "140": "Artic Tern", "141": "Black Tern", "142": "Caspian Tern", "143": "Common Tern", "144": "Elegant Tern", "145": "Forsters Tern", "146": "Least Tern", "147": "Green tailed Towhee", "148": "Brown Thrasher", "149": "Sage Thrasher", "150": "Black capped Vireo", "151": "Blue headed Vireo", "152": "Philadelphia Vireo", "153": "Red eyed Vireo", "154": "Warbling Vireo", "155": "White eyed Vireo", "156": "Yellow throated Vireo", "157": "Bay breasted Warbler", "158": "Black and white Warbler", "159": "Black throated Blue Warbler", "160": "Blue winged Warbler", "161": "Canada Warbler", "162": "Cape May Warbler", "163": "Cerulean Warbler", "164": "Chestnut sided Warbler", "165": "Golden winged Warbler", "166": "Hooded Warbler", "167": "Kentucky Warbler", "168": "Magnolia Warbler", "169": "Mourning Warbler", "170": "Myrtle Warbler", "171": "Nashville Warbler", "172": "Orange crowned Warbler", "173": "Palm Warbler", "174": "Pine Warbler", "175": "Prairie Warbler", "176": "Prothonotary Warbler", "177": "Swainson Warbler", "178": "Tennessee Warbler", "179": "Wilson Warbler", "180": "Worm eating Warbler", "181": "Yellow Warbler", "182": "Northern Waterthrush", "183": "Louisiana Waterthrush", "184": "Bohemian Waxwing", "185": "Cedar Waxwing", "186": "American Three toed Woodpecker", "187": "Pileated Woodpecker", "188": "Red bellied Woodpecker", "189": "Red cockaded Woodpecker", "190": "Red headed Woodpecker", "191": "Downy Woodpecker", "192": "Bewick Wren", "193": "Cactus Wren", "194": "Carolina Wren", "195": "House Wren", "196": "Marsh Wren", "197": "Rock Wren", "198": "Winter Wren", "199": "Common Yellowthroat"}}}}, {"name": "file_name", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 583864786.046, "num_examples": 5994}, {"name": "test", "num_bytes": 577090882.934, "num_examples": 5794}], "download_size": 1148682949, "dataset_size": 1160955668.98}}
2023-03-26T12:15:01+00:00
53a3d0c7b18701da4093a143ff2db27e3a6040b2
# Dataset Card for Noto Emoji Captions BLIP generated captions for Noto emojis. The dataset was captioned with the [pre-trained BLIP model](https://github.com/salesforce/BLIP). It contains a list of ´image´ and ´text´ keys with the images being 512x512.
arattinger/noto-emoji-captions
[ "annotations_creators:machine-generated", "multilinguality:monolingual", "language:en", "region:us" ]
2023-03-26T12:25:46+00:00
{"annotations_creators": ["machine-generated"], "language": ["en"], "multilinguality": ["monolingual"], "pretty_name": "Pok\u00e9mon BLIP captions", "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 77868555.5, "num_examples": 3468}], "download_size": 77424588, "dataset_size": 77868555.5}}
2023-03-26T13:21:59+00:00
c75ea560a75e454506b9cdc9e733c683acc74a8d
# Dataset Card for "torgo_70_30" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Akshay-Sai/torgo_70_30
[ "region:us" ]
2023-03-26T12:53:24+00:00
{"dataset_info": {"features": [{"name": "label", "dtype": {"class_label": {"names": {"0": "control", "1": "pathology"}}}}, {"name": "input_features", "sequence": {"sequence": "float32"}}], "splits": [{"name": "train", "num_bytes": 4344541968, "num_examples": 4524}, {"name": "test", "num_bytes": 1863044080, "num_examples": 1940}], "download_size": 753776953, "dataset_size": 6207586048}}
2023-03-26T12:54:38+00:00
9d1771f869ed52bf564ed2d38b4cc24cf7edcb20
# ChatGPT3.5 Noisy Translation Twitter Notebooks at https://github.com/mesolitica/malaysian-dataset/tree/master/translation/chatgpt3.5-twitter
mesolitica/chatgpt-noisy-translation-twitter
[ "task_categories:translation", "language:ms", "region:us" ]
2023-03-26T13:19:05+00:00
{"language": ["ms"], "task_categories": ["translation"]}
2023-12-17T04:07:43+00:00
231b5e5816682d6d2fdd88b3146bbddd3c3b649f
# Dataset Card for "fake-news-detection-dataset-english" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mohammadjavadpirhadi/fake-news-detection-dataset-english
[ "task_categories:text-classification", "size_categories:10K<n<100K", "language:en", "license:mit", "region:us" ]
2023-03-26T13:19:58+00:00
{"language": ["en"], "license": "mit", "size_categories": ["10K<n<100K"], "task_categories": ["text-classification"], "pretty_name": "Fake News Detection English", "dataset_info": {"features": [{"name": "title", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "subject", "dtype": "string"}, {"name": "date", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "real", "1": "fake"}}}}], "splits": [{"name": "train", "num_bytes": 93521249, "num_examples": 35918}, {"name": "test", "num_bytes": 23506751, "num_examples": 8980}], "download_size": 71290190, "dataset_size": 117028000}}
2023-03-26T15:10:25+00:00
1fa99238eb5e01496343cd31aa3bb563f416b563
# LandCover.ai: Dataset for Automatic Mapping of Buildings, Woodlands, Water and Roads from Aerial Imagery My project based on the dataset, can be found on Github: https://github.com/MortenTabaka/Semantic-segmentation-of-LandCover.ai-dataset The dataset used in this project is the [Landcover.ai Dataset](https://landcover.ai.linuxpolska.com/), which was originally published with [LandCover.ai: Dataset for Automatic Mapping of Buildings, Woodlands, Water and Roads from Aerial Imagery paper](https://arxiv.org/abs/2005.02264) also accessible on [PapersWithCode](https://paperswithcode.com/paper/landcover-ai-dataset-for-automatic-mapping-of). **Please note that I am not the author or owner of this dataset, and I am using it under the terms of the license specified by the original author. All credits for the dataset go to the original author and contributors.** --- license: cc-by-nc-sa-4.0 ---
MortenTabaka/LandCover-Aerial-Imagery-for-semantic-segmentation
[ "task_categories:image-segmentation", "license:cc-by-nc-sa-4.0", "arxiv:2005.02264", "region:us" ]
2023-03-26T13:36:08+00:00
{"license": "cc-by-nc-sa-4.0", "task_categories": ["image-segmentation"]}
2023-03-26T16:28:43+00:00
74ec94765d5f584496ac81cab59833a34be8b217
AlexFierro9/imagenet-1k_test
[ "license:bsd-2-clause", "region:us" ]
2023-03-26T13:36:23+00:00
{"license": "bsd-2-clause"}
2023-03-26T13:51:51+00:00
cc7d9183b9e63bfe9ff834c0df60c92620954560
# Dataset Card for "Torgo_train-30_test-70" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
AravindVadlapudi02/Torgo_train-30_test-70
[ "region:us" ]
2023-03-26T13:56:31+00:00
{"dataset_info": {"features": [{"name": "label", "dtype": {"class_label": {"names": {"0": "control", "1": "pathology"}}}}, {"name": "input_features", "sequence": {"sequence": "float32"}}], "splits": [{"name": "train", "num_bytes": 1862083748, "num_examples": 1939}, {"name": "test", "num_bytes": 4345502300, "num_examples": 4525}], "download_size": 753824940, "dataset_size": 6207586048}}
2023-03-26T14:01:00+00:00
43398f9e1689b06b59357ff82a452a696972114a
# Dataset Card for "google_fleurs_plus_common_voice_11_arabic_language" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
MohammadJamalaldeen/google_fleurs_plus_common_voice_11_arabic_language
[ "region:us" ]
2023-03-26T14:13:45+00:00
{"dataset_info": {"features": [{"name": "input_features", "sequence": {"sequence": "float32"}}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 39269047480, "num_examples": 40880}, {"name": "test", "num_bytes": 10027780960, "num_examples": 10440}], "download_size": 0, "dataset_size": 49296828440}}
2023-03-26T20:09:52+00:00
767a6d8f8856e8655ef16aa9e7896c4490e46169
suolyer/afqmc
[ "license:apache-2.0", "region:us" ]
2023-03-26T14:19:37+00:00
{"license": "apache-2.0"}
2023-03-26T14:28:45+00:00
e3fe8758057828687f4d1640f836f1e40dc2c2de
suolyer/cmnli
[ "license:apache-2.0", "region:us" ]
2023-03-26T14:30:27+00:00
{"license": "apache-2.0"}
2023-03-26T14:31:42+00:00
bde1cbacf914c896ada9796fd3c6382d90af5b40
# Dataset Card for Dataset Name ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This repository contains a machine-translated French version of the portion of [MultiNLI](https://cims.nyu.edu/~sbowman/multinli) concerning the 9/11 terrorist attacks (2000 examples). Note that these 2000 examples included in MultiNLI (and machine translated in French here) on the subject of 9/11 are different from the 249 examples in the validation subset and the 501 ones in the test subset of XNLI on the same subject. In the original subset of MultiNLI on 9/11, 26 examples were left without gold label. In this French version, we have given a gold label also to these examples (so that there are no more examples without gold label), according to our reading of the examples. ### Supported Tasks and Leaderboards This dataset can be used for the task of Natural Language Inference (NLI), also known as Recognizing Textual Entailment (RTE), which is a sentence-pair classification task. ## Dataset Structure ### Data Fields - `premise`: The machine translated premise in the target language. - `hypothesis`: The machine translated premise in the target language. - `label`: The classification label, with possible values 0 (`entailment`), 1 (`neutral`), 2 (`contradiction`). - `label_text`: The classification label, with possible values `entailment` (0), `neutral` (1), `contradiction` (2). - `pairID`: Unique identifier for pair. - `promptID`: Unique identifier for prompt. - `premise_original`: The original premise from the English source dataset. - `hypothesis_original`: The original hypothesis from the English source dataset. ### Data Splits | name |entailment|neutral|contradiction| |--------|---------:|------:|------------:| |mnli_fr | 705 | 641 | 654 | ## Dataset Creation The dataset was machine translated from English to French using the latest neural machine translation [opus-mt-tc-big](https://huggingface.co/Helsinki-NLP/opus-mt-tc-big-en-fr) model available for French. The translation of the sentences was carried out on March 29th, 2023. ## Additional Information ### Citation Information **BibTeX:** ````BibTeX @InProceedings{N18-1101, author = "Williams, Adina and Nangia, Nikita and Bowman, Samuel", title = "A Broad-Coverage Challenge Corpus for Sentence Understanding through Inference", booktitle = "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers)", year = "2018", publisher = "Association for Computational Linguistics", pages = "1112--1122", location = "New Orleans, Louisiana", url = "http://aclweb.org/anthology/N18-1101" } ```` **ACL:** Adina Williams, Nikita Nangia, and Samuel Bowman. 2018. [A Broad-Coverage Challenge Corpus for Sentence Understanding through Inference](https://aclanthology.org/N18-1101/). In *Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers)*, pages 1112–1122, New Orleans, Louisiana. Association for Computational Linguistics. ### Acknowledgements This translation of the original dataset was done as part of a research project supported by the Defence Innovation Agency (AID) of the Directorate General of Armament (DGA) of the French Ministry of Armed Forces, and by the ICO, _Institut Cybersécurité Occitanie_, funded by Région Occitanie, France.
maximoss/mnli-nineeleven-fr-mt
[ "task_categories:text-classification", "task_ids:natural-language-inference", "task_ids:multi-input-text-classification", "size_categories:1K<n<10K", "language:fr", "license:bsd-2-clause", "region:us" ]
2023-03-26T15:07:15+00:00
{"language": ["fr"], "license": "bsd-2-clause", "size_categories": ["1K<n<10K"], "task_categories": ["text-classification"], "task_ids": ["natural-language-inference", "multi-input-text-classification"]}
2024-02-04T12:38:08+00:00
892be869923321dfa32e0ae7c8843b58edbfc8fb
suolyer/wudao
[ "license:apache-2.0", "region:us" ]
2023-03-26T15:21:47+00:00
{"license": "apache-2.0"}
2023-03-27T02:56:02+00:00
59d90c9cd486cb8e34e1a4e885b33332428867fb
suolyer/pile_arxiv
[ "license:apache-2.0", "region:us" ]
2023-03-26T15:35:22+00:00
{"license": "apache-2.0"}
2023-03-27T02:02:37+00:00
b1dda0ce284fce8344d0392990d6f896b9a73722
suolyer/pile_books3
[ "license:apache-2.0", "region:us" ]
2023-03-26T15:35:38+00:00
{"license": "apache-2.0"}
2023-03-27T02:01:19+00:00
748c1fbd43548557e1ee77708efb48113b8e486a
suolyer/pile_bookcorpus2
[ "license:apache-2.0", "region:us" ]
2023-03-26T15:35:53+00:00
{"license": "apache-2.0"}
2023-03-27T01:21:22+00:00
56dbd1bb988a2cc9214290ed603f77aa1c90221a
suolyer/pile_dm-mathematics
[ "license:apache-2.0", "region:us" ]
2023-03-26T15:36:10+00:00
{"license": "apache-2.0"}
2023-03-27T02:03:56+00:00
3a2eb2ad2043bfa73e009b9521a7eb49729eb17d
suolyer/pile_enron
[ "license:apache-2.0", "region:us" ]
2023-03-26T15:36:26+00:00
{"license": "apache-2.0"}
2023-03-27T01:19:27+00:00
0e033f8042ab1e4b979faaf3ad4e0b51acb4cd9f
suolyer/pile_europarl
[ "license:apache-2.0", "region:us" ]
2023-03-26T15:36:39+00:00
{"license": "apache-2.0"}
2023-03-27T01:55:56+00:00
0cd51a24dcf1054224210ffe2fb565dab07a8670
suolyer/pile_freelaw
[ "license:apache-2.0", "region:us" ]
2023-03-26T15:36:50+00:00
{"license": "apache-2.0"}
2023-03-27T02:04:54+00:00
6945c88acdf4330ae1402e752f925a7d4dbaa47e
suolyer/pile_github
[ "license:apache-2.0", "region:us" ]
2023-03-26T15:37:07+00:00
{"license": "apache-2.0"}
2023-03-27T01:59:58+00:00
d0ee465780343f57bed66b81e1116686e36d1fb8
suolyer/pile_gutenberg
[ "license:apache-2.0", "region:us" ]
2023-03-26T15:37:23+00:00
{"license": "apache-2.0"}
2023-03-27T02:00:45+00:00
59d8ebc52c138f4c0c1d06245e3fab2c98207634
suolyer/pile_hackernews
[ "license:apache-2.0", "region:us" ]
2023-03-26T15:37:37+00:00
{"license": "apache-2.0"}
2023-03-27T01:17:34+00:00
0bc97d40ada9248427406f2f3e157508a2ff5f33
suolyer/pile_nih-exporter
[ "license:apache-2.0", "region:us" ]
2023-03-26T15:37:53+00:00
{"license": "apache-2.0"}
2023-03-27T01:15:58+00:00
344c26a3db7fb8d9648764554a18333f1918db2d
suolyer/pile_opensubtitles
[ "license:apache-2.0", "region:us" ]
2023-03-26T15:38:08+00:00
{"license": "apache-2.0"}
2023-03-27T02:02:01+00:00
e0be90d9f1284bf5f924ef581cb5f1a3f0ee0f1b
suolyer/pile_openwebtext2
[ "license:apache-2.0", "region:us" ]
2023-03-26T15:38:21+00:00
{"license": "apache-2.0"}
2023-03-27T02:03:15+00:00
79c0e8ccfa7972b9bdb7ea60f77497648e7dbc02
suolyer/pile_philpapers
[ "license:apache-2.0", "region:us" ]
2023-03-26T15:38:36+00:00
{"license": "apache-2.0"}
2023-03-27T01:13:28+00:00
30cd3934c9c886b4e64acfd62bb863e2dec1c516
suolyer/pile_pile-cc
[ "license:apache-2.0", "region:us" ]
2023-03-26T15:38:55+00:00
{"license": "apache-2.0"}
2023-03-27T02:04:43+00:00
139fdbf5ae762ad22d3910fb4bb6925c823ebf58
suolyer/pile_pubmed-abstracts
[ "license:apache-2.0", "region:us" ]
2023-03-26T15:39:16+00:00
{"license": "apache-2.0"}
2023-03-27T02:05:32+00:00
783dc95a38943ca7298cc039dc57a8b5b85b7eb1
suolyer/pile_pubmed-central
[ "license:apache-2.0", "region:us" ]
2023-03-26T15:39:32+00:00
{"license": "apache-2.0"}
2023-03-27T02:06:17+00:00
8f730a4c3d161942ef121118f9f3097b5e3bc855
suolyer/pile_stackexchange
[ "license:apache-2.0", "region:us" ]
2023-03-26T15:39:49+00:00
{"license": "apache-2.0"}
2023-03-27T02:06:52+00:00
a97e63f1f2ded8724eb0c91f8d84932ac7e3f412
suolyer/pile_uspto
[ "license:apache-2.0", "region:us" ]
2023-03-26T15:40:08+00:00
{"license": "apache-2.0"}
2023-03-27T01:09:02+00:00