sha
stringlengths 40
40
| text
stringlengths 0
13.4M
| id
stringlengths 2
117
| tags
list | created_at
stringlengths 25
25
| metadata
stringlengths 2
31.7M
| last_modified
stringlengths 25
25
|
---|---|---|---|---|---|---|
d8eca65dac57cc7bdbca513ea6e9a6f425812a09
|
Mindshift/ascen
|
[
"license:openrail",
"region:us"
] |
2023-03-09T15:13:23+00:00
|
{"license": "openrail"}
|
2023-03-09T15:14:13+00:00
|
|
ca47b126057611b50cb3bafd73d2ebecb69a9988
|
# Dataset Card for "tib_01"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
gigant/tib_01
|
[
"region:us"
] |
2023-03-09T15:16:16+00:00
|
{"dataset_info": {"features": [{"name": "doi", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "video_url", "dtype": "string"}, {"name": "license", "dtype": "string"}, {"name": "subject", "dtype": "string"}, {"name": "genre", "dtype": "string"}, {"name": "release_year", "dtype": "string"}, {"name": "author", "dtype": "string"}, {"name": "contributors", "dtype": "string"}, {"name": "abstract", "dtype": "string"}, {"name": "transcript", "dtype": "string"}, {"name": "transcript_segments", "sequence": [{"name": "id", "dtype": "int32"}, {"name": "seek", "dtype": "int32"}, {"name": "start", "dtype": "float32"}, {"name": "end", "dtype": "float32"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "int32"}, {"name": "temperature", "dtype": "float32"}, {"name": "avg_logprob", "dtype": "float32"}, {"name": "compression_ratio", "dtype": "float32"}, {"name": "no_speech_prob", "dtype": "float32"}]}, {"name": "keyframes", "sequence": [{"name": "slide", "dtype": "string"}, {"name": "frames", "sequence": "int32"}, {"name": "timestamp", "sequence": "float32"}]}], "splits": [{"name": "train", "num_bytes": 1074314815.9313533, "num_examples": 9381}], "download_size": 513790688, "dataset_size": 1074314815.9313533}}
|
2023-03-09T15:17:21+00:00
|
4a2a86dc62566bab2affa8975fa4b08a2c97e259
|
Starzilla/aesthetic_and_apples_dataset_stable-diffusion_training
|
[
"license:unknown",
"region:us"
] |
2023-03-09T15:40:18+00:00
|
{"license": "unknown"}
|
2023-03-09T15:41:30+00:00
|
|
c8ae0e1e43e845a9d005451019d632b3c94f7596
|
# Wikipedia and OSCAR Turkish Dataset
👋 Welcome to the "Wikipedia and OSCAR Turkish" Huggingface Repo!
📚 This repo contains a Turkish language dataset generated by merging Wikipedia and OSCAR cleaned Common Crawl. The dataset contains over 13 million examples with a single feature - text.
🔍 This dataset can be useful for natural language processing tasks in Turkish language.
📥 To download the dataset, you can use the Hugging Face Datasets library. Here's some sample code to get started:
from datasets import load_dataset
dataset = load_dataset("musabg/wikipedia-oscar-tr")
🤖 Have fun exploring this dataset and training language models on it!
|
musabg/wikipedia-oscar-tr
|
[
"region:us"
] |
2023-03-09T15:49:57+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 74636783061.0, "num_examples": 13847707}], "download_size": 41512074295, "dataset_size": 74636783061.0}}
|
2023-05-10T07:57:22+00:00
|
ef74fc26378d5f0826978942fc0920e8f1ca2ba5
|
Org logos
|
tzvc/organization-logos
|
[
"task_categories:zero-shot-classification",
"size_categories:1M<n<10M",
"language:en",
"logos",
"region:us"
] |
2023-03-09T16:34:14+00:00
|
{"language": ["en"], "size_categories": ["1M<n<10M"], "task_categories": ["zero-shot-classification"], "tags": ["logos"]}
|
2023-03-26T17:23:10+00:00
|
3442fbcb16524c61acc3305471da00bbb5606784
|
# Dataset Card for "test_ds"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
renyulin/test_ds
|
[
"region:us"
] |
2023-03-09T17:09:07+00:00
|
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "pos_tags", "sequence": {"class_label": {"names": {"0": "\"", "1": "''", "2": "#", "3": "$", "4": "(", "5": ")", "6": ",", "7": ".", "8": ":", "9": "``", "10": "CC", "11": "CD", "12": "DT", "13": "EX", "14": "FW", "15": "IN", "16": "JJ", "17": "JJR", "18": "JJS", "19": "LS", "20": "MD", "21": "NN", "22": "NNP", "23": "NNPS", "24": "NNS", "25": "NN|SYM", "26": "PDT", "27": "POS", "28": "PRP", "29": "PRP$", "30": "RB", "31": "RBR", "32": "RBS", "33": "RP", "34": "SYM", "35": "TO", "36": "UH", "37": "VB", "38": "VBD", "39": "VBG", "40": "VBN", "41": "VBP", "42": "VBZ", "43": "WDT", "44": "WP", "45": "WP$", "46": "WRB"}}}}, {"name": "chunk_tags", "sequence": {"class_label": {"names": {"0": "O", "1": "B-ADJP", "2": "I-ADJP", "3": "B-ADVP", "4": "I-ADVP", "5": "B-CONJP", "6": "I-CONJP", "7": "B-INTJ", "8": "I-INTJ", "9": "B-LST", "10": "I-LST", "11": "B-NP", "12": "I-NP", "13": "B-PP", "14": "I-PP", "15": "B-PRT", "16": "I-PRT", "17": "B-SBAR", "18": "I-SBAR", "19": "B-UCP", "20": "I-UCP", "21": "B-VP", "22": "I-VP"}}}}, {"name": "ner_tags", "sequence": {"class_label": {"names": {"0": "O", "1": "B-PER", "2": "I-PER", "3": "B-ORG", "4": "I-ORG", "5": "B-LOC", "6": "I-LOC", "7": "B-MISC", "8": "I-MISC"}}}}], "splits": [{"name": "train", "num_bytes": 6931345, "num_examples": 14041}, {"name": "validation", "num_bytes": 1739223, "num_examples": 3250}, {"name": "test", "num_bytes": 1582054, "num_examples": 3453}], "download_size": 1815184, "dataset_size": 10252622}}
|
2023-03-09T17:09:44+00:00
|
0ef9b4c5c692fb11c96df18f304ffe8e43066f0e
|
wantswanda/chinese
|
[
"task_categories:image-classification",
"size_categories:1K<n<10K",
"language:en",
"region:us"
] |
2023-03-09T17:15:17+00:00
|
{"language": ["en"], "size_categories": ["1K<n<10K"], "task_categories": ["image-classification"], "pretty_name": "chinese_characters"}
|
2023-03-09T18:10:05+00:00
|
|
c1d2b8fdf12de242cfc03d863acd5594cb6c4d0e
|
omrinach/chroma_guidelines
|
[
"license:apache-2.0",
"region:us"
] |
2023-03-09T18:38:02+00:00
|
{"license": "apache-2.0"}
|
2023-03-17T14:13:30+00:00
|
|
428ff15c93b475e206a0999fe5a2bad39bc53de6
|
# Dataset Card for "evaluacion_derivaciones_update"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mhaite/evaluacion_derivaciones_update
|
[
"region:us"
] |
2023-03-09T20:26:23+00:00
|
{"dataset_info": {"features": [{"name": "fuente", "dtype": "string"}, {"name": "cid", "dtype": "string"}, {"name": "context", "sequence": "string"}, {"name": "prev_target_text", "dtype": "string"}, {"name": "input_text", "dtype": "string"}, {"name": "target_text", "dtype": "string"}, {"name": "entidades", "sequence": "string"}, {"name": "keywords", "sequence": "string"}, {"name": "intent", "dtype": "string"}, {"name": "pod", "dtype": "string"}, {"name": "raw_target_text", "dtype": "string"}, {"name": "raw_input_text", "dtype": "string"}, {"name": "keyutter", "dtype": "bool"}, {"name": "__index_level_0__", "dtype": "int64"}, {"name": "generated_input_text", "dtype": "string"}, {"name": "generated_keywords", "dtype": "string"}, {"name": "generated_entities", "struct": [{"name": "Amigos", "dtype": "float64"}, {"name": "Autoestima", "dtype": "float64"}, {"name": "Edad", "dtype": "float64"}, {"name": "Familia", "dtype": "float64"}, {"name": "Hobbie", "dtype": "float64"}, {"name": "Medicamento", "dtype": "float64"}, {"name": "Nombre", "dtype": "float64"}, {"name": "Pareja", "dtype": "float64"}, {"name": "Problema", "dtype": "float64"}, {"name": "Profesi\u00f3n", "dtype": "float64"}, {"name": "Relevante", "dtype": "float64"}, {"name": "Trabajo", "dtype": "float64"}, {"name": "Tratamiento", "dtype": "float64"}, {"name": "contexto", "dtype": "float64"}, {"name": "problemas de sue\u00f1o", "dtype": "float64"}, {"name": "problemas t\u00e9cnicos", "dtype": "float64"}, {"name": "sintomas", "dtype": "float64"}, {"name": "soluciones", "dtype": "float64"}]}, {"name": "generated_intents", "struct": [{"name": "None", "dtype": "float64"}, {"name": "adiccion", "dtype": "float64"}, {"name": "alimentario", "dtype": "float64"}, {"name": "animo", "dtype": "float64"}, {"name": "ansiedad", "dtype": "float64"}, {"name": "autoestima", "dtype": "float64"}, {"name": "autopercepcion", "dtype": "float64"}, {"name": "contexto_covid", "dtype": "float64"}, {"name": "depresion", "dtype": "float64"}, {"name": "duelo", "dtype": "float64"}, {"name": "enfermedades_de_salud_mental", "dtype": "float64"}, {"name": "estres", "dtype": "float64"}, {"name": "estres_academico", "dtype": "float64"}, {"name": "estres_economico", "dtype": "float64"}, {"name": "familiar_salud_fisica", "dtype": "float64"}, {"name": "fobia", "dtype": "float64"}, {"name": "frustracion_", "dtype": "float64"}, {"name": "identidad_genero_y_sexual", "dtype": "float64"}, {"name": "infidelidad", "dtype": "float64"}, {"name": "informacion", "dtype": "float64"}, {"name": "informacion_familiar", "dtype": "float64"}, {"name": "ira", "dtype": "float64"}, {"name": "laboral", "dtype": "float64"}, {"name": "miedo", "dtype": "float64"}, {"name": "problemas_familiares", "dtype": "float64"}, {"name": "random", "dtype": "float64"}, {"name": "relaciones_de_pareja", "dtype": "float64"}, {"name": "relaciones_sociales", "dtype": "float64"}, {"name": "salud_fisica", "dtype": "float64"}, {"name": "separacion", "dtype": "float64"}, {"name": "sexualidad", "dtype": "float64"}, {"name": "sue\u00f1o", "dtype": "float64"}, {"name": "sugerencia", "dtype": "float64"}]}, {"name": "generated_pod", "struct": [{"name": "despedida", "dtype": "float64"}, {"name": "motivo_consulta", "dtype": "float64"}, {"name": "pregunta_sobre_motivo", "dtype": "float64"}, {"name": "presentacion", "dtype": "float64"}, {"name": "solucion", "dtype": "float64"}]}, {"name": "generated_keyutter", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 352754, "num_examples": 266}], "download_size": 285947, "dataset_size": 352754}}
|
2023-03-09T20:26:27+00:00
|
d2cabf7268a66532baf87d469c8d8856c9c0899b
|
KJohnes/CMP_facade_DB_base
|
[
"license:unknown",
"region:us"
] |
2023-03-09T21:37:05+00:00
|
{"license": "unknown"}
|
2023-03-09T21:37:45+00:00
|
|
c8bd9301d321c973cfd7a681e222fab5382beace
|
indo-law: Indonesian law dataset containing section annotation of court decision documents
https://github.com/ir-nlp-csui/indo-law
```
@article{nuranti2022predicting,
title={Predicting the Category and the Length of Punishment in Indonesian Courts Based on Previous Court Decision Documents},
author={Nuranti, Eka Qadri and Yulianti, Evi and Husin, Husna Sarirah},
journal={Computers},
volume={11},
number={6},
pages={88},
year={2022},
publisher={Multidisciplinary Digital Publishing Institute}
}
```
|
bstds/indo_law
|
[
"language:id",
"region:us"
] |
2023-03-09T21:55:23+00:00
|
{"language": ["id"]}
|
2023-03-10T15:29:04+00:00
|
66fde603d92a65c3942250535084971ac7ceffc7
|
# Dataset Card for "turkishSMS-ds"
The dataset was utilized in the following study. It consists of Turkish SMS spam and legitimate data.
Uysal, A. K., Gunal, S., Ergin, S., & Gunal, E. S. (2013). The impact of feature extraction and selection on SMS spam filtering. Elektronika ir Elektrotechnika, 19(5), 67-72.
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
akuysal/turkishSMS-ds
|
[
"task_categories:text-classification",
"task_categories:text-generation",
"language:tr",
"region:us"
] |
2023-03-09T22:02:04+00:00
|
{"language": ["tr"], "task_categories": ["text-classification", "text-generation"], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": "string"}, {"name": "sms length", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 95574.6, "num_examples": 765}, {"name": "validation", "num_bytes": 10619.4, "num_examples": 85}], "download_size": 59882, "dataset_size": 106194}}
|
2023-03-19T10:50:48+00:00
|
ba289af64a852c5a74a102b51ad687c1d7c93f0f
|
# Dataset Card for "beer_reviews_label_drift_neg"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
EJaalborg2022/beer_reviews_label_drift_neg
|
[
"region:us"
] |
2023-03-09T22:09:39+00:00
|
{"dataset_info": {"features": [{"name": "prediction_ts", "dtype": "float32"}, {"name": "beer_ABV", "dtype": "float32"}, {"name": "beer_name", "dtype": "string"}, {"name": "beer_style", "dtype": "string"}, {"name": "review_appearance", "dtype": "float32"}, {"name": "review_palette", "dtype": "float32"}, {"name": "review_taste", "dtype": "float32"}, {"name": "review_aroma", "dtype": "float32"}, {"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "negative", "1": "neutral", "2": "positive"}}}}], "splits": [{"name": "training", "num_bytes": 6908323, "num_examples": 9000}, {"name": "validation", "num_bytes": 970104, "num_examples": 1260}, {"name": "production", "num_bytes": 21305419, "num_examples": 27742}], "download_size": 16954616, "dataset_size": 29183846}}
|
2023-03-10T20:58:48+00:00
|
a411840c4d2f6229c4f14a17fbec0693b4b06b3c
|
# Dataset Card for "spanish_legal_ds_tokenized_and_gropuped"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mrm8488/spanish_legal_ds_tokenized_and_gropuped
|
[
"region:us"
] |
2023-03-09T22:34:09+00:00
|
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}], "splits": [{"name": "train", "num_bytes": 7117386800, "num_examples": 1735948}, {"name": "test", "num_bytes": 703888000, "num_examples": 171680}], "download_size": 3629670012, "dataset_size": 7821274800}}
|
2023-03-09T22:41:00+00:00
|
68212af701c3b41db52f2fb647b9e0d289203156
|
# Dataset Card for "dataforlarge"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
hts98/dataforlarge
|
[
"region:us"
] |
2023-03-09T22:34:10+00:00
|
{"dataset_info": {"features": [{"name": "input_length", "dtype": "int64"}, {"name": "input_features", "sequence": {"sequence": "float32"}}, {"name": "labels", "sequence": "int64"}, {"name": "labels_length", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 3288656680.0, "num_examples": 3420}, {"name": "test", "num_bytes": 823132264.0, "num_examples": 856}], "download_size": 0, "dataset_size": 4111788944.0}}
|
2023-03-10T07:15:47+00:00
|
2dda893b2913818b523480bd067a49af85a7d12f
|
# Dataset Card for "prj_gia_dataset_metaworld_assembly_v2_1112"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
qgallouedec/prj_gia_dataset_metaworld_assembly_v2_1112
|
[
"region:us"
] |
2023-03-09T22:36:43+00:00
|
{"dataset_info": {"features": [{"name": "observations", "sequence": "float32"}, {"name": "actions", "sequence": "float32"}, {"name": "dones", "dtype": "bool"}, {"name": "rewards", "dtype": "float32"}], "splits": [{"name": "train", "num_bytes": 18412500, "num_examples": 100000}], "download_size": 7293153, "dataset_size": 18412500}}
|
2023-03-09T22:36:51+00:00
|
a64784e017411fd7482842b5ebbc841972e4e219
|
A cleaned and tokenized version of the English data from [Mozilla Common Voice 11 dataset](https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0/tree/main).
Cleaning steps:
* Filtered on samples with >2 upvotes and <1 downvotes]
* Removed non voice audio at start and end through pytorch VAD
Tokenization:
* Audio tokenized through [EnCodec by Meta](https://github.com/facebookresearch/encodec)
* Using 24khz pre-trained model, and target bandwidth of 1.5
* Represented in text as audio_token_0 - audio_token_1023
* Prompts constructed as "text: \<common voice transcript\>\naudio: \<audio tokens\>"
* Prompts tokenized with GPT tokenizer with added vocab of audio tokens.
* Tokenized prompts padded to size 1024 with eos_token.
Each sample has 3 properties: input_ids, attention_mask and labels. input_ids and labels are the tokenized prompts and attention_mask is the attention mask.
|
anforsm/common_voice_11_clean_tokenized
|
[
"task_categories:text-to-speech",
"task_categories:text-generation",
"size_categories:10K<n<100K",
"language:en",
"license:cc0-1.0",
"region:us"
] |
2023-03-09T22:40:02+00:00
|
{"language": ["en"], "license": "cc0-1.0", "size_categories": ["10K<n<100K"], "task_categories": ["text-to-speech", "text-generation"], "pretty_name": "Common Voice 11 (en) Cleaned and Tokenized", "dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 1109542776, "num_examples": 83274}, {"name": "validation", "num_bytes": 17374496, "num_examples": 1304}], "download_size": 197852035, "dataset_size": 1126917272}}
|
2023-03-09T23:53:49+00:00
|
2384668e7afabe725f4d038b6d2062424bfee753
|
trondizzy/XLEnt_v1.2
|
[
"task_categories:translation",
"size_categories:1M<n<10M",
"language:en",
"language:uk",
"license:cc",
"region:us"
] |
2023-03-10T02:08:13+00:00
|
{"language": ["en", "uk"], "license": "cc", "size_categories": ["1M<n<10M"], "task_categories": ["translation"]}
|
2023-03-10T02:10:43+00:00
|
|
4e0cbe1f34e54a4a567fbc9eebd7b16654ef35c0
|
# Dataset Card for "annotated-code-functions-teensy"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
michaelnath/annotated-code-functions-teensy
|
[
"region:us"
] |
2023-03-10T02:55:10+00:00
|
{"dataset_info": {"features": [{"name": "function", "dtype": "string"}, {"name": "repo_name", "dtype": "string"}, {"name": "features", "sequence": "float64"}], "splits": [{"name": "train", "num_bytes": 454721, "num_examples": 1001}], "download_size": 152815, "dataset_size": 454721}}
|
2023-03-10T02:55:13+00:00
|
b6ec932b960a4f6d38ca7119065415b85f1a6aee
|
jackli888/aodiandata
|
[
"license:openrail",
"region:us"
] |
2023-03-10T02:57:40+00:00
|
{"license": "openrail"}
|
2023-03-10T02:57:40+00:00
|
|
140015ea4ddf1da7217d17e815304a418860b1c5
|
# Dataset Card for "Nolan_whisper_educate"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Nolan1206/Nolan_whisper_educate
|
[
"region:us"
] |
2023-03-10T04:01:31+00:00
|
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 82981.0, "num_examples": 3}, {"name": "train", "num_bytes": 82981.0, "num_examples": 3}], "download_size": 169572, "dataset_size": 165962.0}}
|
2023-03-11T03:54:41+00:00
|
9235b1df2a6549e2780ae9c4a30672cd7e1c8d97
|
gjuggler/bird-data
|
[
"task_categories:image-classification",
"language:en",
"license:creativeml-openrail-m",
"biology",
"region:us"
] |
2023-03-10T04:24:30+00:00
|
{"language": ["en"], "license": "creativeml-openrail-m", "task_categories": ["image-classification"], "dataset_info": {"features": [{"name": "image_file_path", "dtype": "string"}, {"name": "image", "dtype": "image"}, {"name": "labels", "dtype": {"class_label": {"names": {"0": "Little Blue Heron", "1": "Swainson's Hawk", "2": "Glaucous-winged Gull", "3": "Spotted Towhee", "4": "Neotropic Cormorant", "5": "White-eyed Vireo", "6": "Tundra Swan", "7": "Costa's Hummingbird", "8": "American Crow", "9": "American Tree Sparrow", "10": "Savannah Sparrow", "11": "Verdin", "12": "Wild Turkey", "13": "Rufous Hummingbird", "14": "Blue-gray Gnatcatcher", "15": "Song Sparrow", "16": "Tricolored Heron", "17": "Phainopepla", "18": "Harlequin Duck", "19": "Florida Scrub-Jay", "20": "Black-billed Cuckoo", "21": "Laughing Gull", "22": "Lesser Goldfinch", "23": "Common Tern", "24": "Tree Swallow", "25": "Black-billed Magpie", "26": "Surf Scoter", "27": "Black-and-white Warbler", "28": "Mountain Chickadee", "29": "California Thrasher", "30": "Osprey", "31": "Long-tailed Duck", "32": "Semipalmated Plover", "33": "Reddish Egret", "34": "Black Guillemot", "35": "Ring-billed Gull", "36": "American Avocet", "37": "White-faced Ibis", "38": "Western Tanager", "39": "Black-bellied Plover", "40": "Winter Wren", "41": "Mississippi Kite", "42": "Townsend's Solitaire", "43": "Bonaparte's Gull", "44": "Cassin's Finch", "45": "Yellow-rumped Warbler", "46": "Great Black-backed Gull", "47": "Red-naped Sapsucker", "48": "Swamp Sparrow", "49": "Western Screech-Owl", "50": "Rusty Blackbird", "51": "Northern Saw-whet Owl", "52": "Plumbeous Vireo", "53": "Bushtit", "54": "White-tailed Kite", "55": "White Ibis", "56": "Ovenbird", "57": "Cactus Wren", "58": "Fish Crow", "59": "Greater Scaup", "60": "Pacific Loon", "61": "Red-breasted Sapsucker", "62": "Pied-billed Grebe", "63": "Eastern Towhee", "64": "Acorn Woodpecker", "65": "Mourning Dove", "66": "Red-bellied Woodpecker", "67": "Eastern Wood-Pewee", "68": "Northern Mockingbird", "69": "Red Crossbill", "70": "Wood Stork", "71": "Pine Siskin", "72": "Pacific Wren", "73": "Barrow's Goldeneye", "74": "American White Pelican", "75": "Cordilleran Flycatcher", "76": "Eastern Meadowlark", "77": "Yellow-headed Blackbird", "78": "Chipping Sparrow", "79": "Common Grackle", "80": "American Dipper", "81": "Double-crested Cormorant", "82": "Black Phoebe", "83": "Surfbird", "84": "Loggerhead Shrike", "85": "Gila Woodpecker", "86": "Snow Bunting", "87": "Field Sparrow", "88": "Brown Pelican", "89": "Merlin", "90": "Golden Eagle", "91": "Turkey Vulture", "92": "American Wigeon", "93": "Black Turnstone", "94": "Swainson's Thrush", "95": "White-winged Crossbill", "96": "Oak Titmouse", "97": "Least Flycatcher", "98": "Brown-headed Cowbird", "99": "Horned Grebe", "100": "Canvasback", "101": "Yellow-breasted Chat", "102": "Pine Warbler", "103": "Bald Eagle", "104": "Downy Woodpecker", "105": "Black-chinned Hummingbird", "106": "Prothonotary Warbler", "107": "Allen's Hummingbird", "108": "Louisiana Waterthrush", "109": "Gray Catbird", "110": "Western Meadowlark", "111": "House Finch", "112": "Brown Thrasher", "113": "Common Goldeneye", "114": "Hoary Redpoll", "115": "Eastern Kingbird", "116": "Evening Grosbeak", "117": "Mexican Jay", "118": "Mute Swan", "119": "Indigo Bunting", "120": "Brewer's Sparrow", "121": "American Goldfinch", "122": "Red-headed Woodpecker", "123": "Bell's Vireo", "124": "White-winged Scoter", "125": "Sandhill Crane", "126": "Boat-tailed Grackle", "127": "Scissor-tailed Flycatcher", "128": "Great-tailed Grackle", "129": "Common Merganser", "130": "Marsh Wren", "131": "Western Wood-Pewee", "132": "Barred Owl", "133": "Canada Warbler", "134": "Common Nighthawk", "135": "Long-billed Curlew", "136": "Scaled Quail", "137": "Western Sandpiper", "138": "Ruby-crowned Kinglet", "139": "Yellow-bellied Sapsucker", "140": "Killdeer", "141": "Chestnut-backed Chickadee", "142": "Belted Kingfisher", "143": "Blackpoll Warbler", "144": "Purple Gallinule", "145": "American Robin", "146": "Solitary Sandpiper", "147": "Chihuahuan Raven", "148": "Yellow-billed Magpie", "149": "Black Tern", "150": "House Sparrow", "151": "Rufous-crowned Sparrow", "152": "Ring-necked Duck", "153": "Warbling Vireo", "154": "Red-shouldered Hawk", "155": "Northern Harrier", "156": "Bay-breasted Warbler", "157": "Great Cormorant", "158": "Rock Pigeon", "159": "Short-billed Dowitcher", "160": "Bronzed Cowbird", "161": "Hooded Warbler", "162": "Black Vulture", "163": "White-breasted Nuthatch", "164": "Lincoln's Sparrow", "165": "Whimbrel", "166": "Varied Thrush", "167": "Dickcissel", "168": "Snowy Owl", "169": "Bank Swallow", "170": "Veery", "171": "Northern Waterthrush", "172": "Bridled Titmouse", "173": "Semipalmated Sandpiper", "174": "Harris's Hawk", "175": "Northern Rough-winged Swallow", "176": "Northern Pintail", "177": "Pelagic Cormorant", "178": "Clark's Grebe", "179": "Broad-winged Hawk", "180": "Swallow-tailed Kite", "181": "Monk Parakeet", "182": "Blackburnian Warbler", "183": "Burrowing Owl", "184": "Cooper's Hawk", "185": "Black Skimmer", "186": "Forster's Tern", "187": "Black-crested Titmouse", "188": "Northwestern Crow", "189": "Wood Thrush", "190": "Blue Jay", "191": "Dunlin", "192": "Yellow-billed Cuckoo", "193": "Black-throated Blue Warbler", "194": "Carolina Chickadee", "195": "Gadwall", "196": "Nuttall's Woodpecker", "197": "Common Gallinule", "198": "Wilson's Snipe", "199": "Greater White-fronted Goose", "200": "Glossy Ibis", "201": "Brant", "202": "Common Ground-Dove", "203": "Band-tailed Pigeon", "204": "Marbled Godwit", "205": "American Redstart", "206": "Clay-colored Sparrow", "207": "American Coot", "208": "American Pipit", "209": "Cackling Goose", "210": "Northern Shrike", "211": "Ruddy Duck", "212": "Red-necked Grebe", "213": "Ross's Goose", "214": "Townsend's Warbler", "215": "American Kestrel", "216": "Royal Tern", "217": "Sharp-shinned Hawk", "218": "Black-legged Kittiwake", "219": "Pileated Woodpecker", "220": "Hermit Thrush", "221": "Northern Gannet", "222": "Western Kingbird", "223": "Green-tailed Towhee", "224": "Pine Grosbeak", "225": "Harris's Sparrow", "226": "Bullock's Oriole", "227": "Brown-headed Nuthatch", "228": "Cinnamon Teal", "229": "Eastern Phoebe", "230": "Gambel's Quail", "231": "Nashville Warbler", "232": "Baltimore Oriole", "233": "Eastern Screech-Owl", "234": "American Oystercatcher", "235": "Ash-throated Flycatcher", "236": "Inca Dove", "237": "Anna's Hummingbird", "238": "Black-headed Grosbeak", "239": "Canada Goose", "240": "Ruby-throated Hummingbird", "241": "California Quail", "242": "American Woodcock", "243": "Spotted Sandpiper", "244": "Blue-headed Vireo", "245": "Wood Duck", "246": "Summer Tanager", "247": "Black-capped Chickadee", "248": "Black-tailed Gnatcatcher", "249": "Juniper Titmouse", "250": "Red-throated Loon", "251": "White-throated Sparrow", "252": "Pacific-slope Flycatcher", "253": "Brown-capped Rosy-Finch", "254": "Canyon Wren", "255": "Say's Phoebe", "256": "Blue-winged Warbler", "257": "Abert's Towhee", "258": "Greater Yellowlegs", "259": "Lazuli Bunting", "260": "Red-breasted Nuthatch", "261": "Carolina Wren", "262": "Red-eyed Vireo", "263": "Yellow-throated Vireo", "264": "Least Sandpiper", "265": "Roseate Spoonbill", "266": "Mallard", "267": "Vesper Sparrow", "268": "Common Redpoll", "269": "Heermann's Gull", "270": "Broad-tailed Hummingbird", "271": "Snowy Egret", "272": "Barn Swallow", "273": "Vermilion Flycatcher", "274": "Rose-breasted Grosbeak", "275": "Dark-eyed Junco", "276": "Crested Caracara", "277": "Gray Jay", "278": "Purple Martin", "279": "Magnolia Warbler", "280": "Orange-crowned Warbler", "281": "Broad-billed Hummingbird", "282": "Painted Bunting", "283": "American Black Duck", "284": "Vaux's Swift", "285": "Northern Bobwhite", "286": "Black-throated Gray Warbler", "287": "Red-winged Blackbird", "288": "Black-crowned Night-Heron", "289": "California Gull", "290": "Common Raven", "291": "Brewer's Blackbird", "292": "Purple Finch", "293": "Northern Cardinal", "294": "Western Scrub-Jay", "295": "Western Bluebird", "296": "Northern Parula", "297": "Northern Pygmy-Owl", "298": "Palm Warbler", "299": "Violet-green Swallow", "300": "Great Crested Flycatcher", "301": "Rough-legged Hawk", "302": "Tufted Titmouse", "303": "MacGillivray's Warbler", "304": "Lark Bunting", "305": "Orchard Oriole", "306": "Bufflehead", "307": "Black Oystercatcher", "308": "Great Egret", "309": "Redhead", "310": "Blue-winged Teal", "311": "Curve-billed Thrasher", "312": "Scarlet Tanager", "313": "Horned Lark", "314": "Brandt's Cormorant", "315": "White-crowned Sparrow", "316": "House Wren", "317": "Chimney Swift", "318": "Black-necked Stilt", "319": "Yellow Warbler", "320": "Pygmy Nuthatch", "321": "Gray-crowned Rosy-Finch", "322": "Hutton's Vireo", "323": "Hooded Merganser", "324": "Western Grebe", "325": "Canyon Towhee", "326": "Ladder-backed Woodpecker", "327": "Bobolink", "328": "Golden-fronted Woodpecker", "329": "Prairie Falcon", "330": "Black-throated Green Warbler", "331": "Greater Roadrunner", "332": "Cedar Waxwing", "333": "Blue Grosbeak", "334": "Mew Gull", "335": "White-throated Swift", "336": "Red-breasted Merganser", "337": "Cassin's Kingbird", "338": "Green Heron", "339": "Eastern Bluebird", "340": "Eared Grebe", "341": "Fox Sparrow", "342": "Pigeon Guillemot", "343": "Black-bellied Whistling-Duck", "344": "Willet", "345": "Mountain Bluebird", "346": "Clark's Nutcracker", "347": "Northern Flicker", "348": "Bewick's Wren", "349": "Prairie Warbler", "350": "Anhinga", "351": "Ruffed Grouse", "352": "Northern Shoveler", "353": "Common Loon", "354": "Bohemian Waxwing", "355": "Peregrine Falcon", "356": "Snow Goose", "357": "Lesser Scaup", "358": "Golden-crowned Kinglet", "359": "Great Blue Heron", "360": "Ruddy Turnstone", "361": "Western Gull", "362": "Hairy Woodpecker", "363": "Black Scoter", "364": "Common Yellowthroat", "365": "Boreal Chickadee", "366": "Cave Swallow", "367": "Mottled Duck", "368": "Yellow-crowned Night-Heron", "369": "Wilson's Phalarope", "370": "Pyrrhuloxia", "371": "Sanderling", "372": "Tennessee Warbler", "373": "Cliff Swallow", "374": "Lark Sparrow", "375": "Ring-necked Pheasant", "376": "Great Horned Owl", "377": "Hermit Warbler", "378": "Yellow-throated Warbler", "379": "Eurasian Collared-Dove", "380": "Mourning Warbler", "381": "Cassin's Vireo", "382": "Cattle Egret", "383": "Cape May Warbler", "384": "European Starling", "385": "Black Rosy-Finch", "386": "White-winged Dove", "387": "Common Eider", "388": "Calliope Hummingbird", "389": "Lesser Yellowlegs", "390": "Golden-crowned Sparrow", "391": "Brown Creeper", "392": "Green-winged Teal", "393": "Red-tailed Hawk", "394": "Hooded Oriole", "395": "Caspian Tern", "396": "Trumpeter Swan", "397": "California Towhee", "398": "Wrentit", "399": "Chestnut-sided Warbler", "400": "Wilson's Warbler", "401": "Barn Owl", "402": "Herring Gull", "403": "Steller's Jay"}}}}], "splits": [{"name": "train", "num_bytes": 9106091, "num_examples": 23912}, {"name": "test", "num_bytes": 9374111, "num_examples": 24615}], "download_size": 9877722099, "dataset_size": 18480202}, "tags": ["biology"]}
|
2023-03-11T14:49:34+00:00
|
|
ce75cb61e786b4f7b53cc906a8fbebcc52066106
|
# Dataset Card for "symptom_text_to_disease_mk4"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
venetis/symptom_text_to_disease_mk4
|
[
"region:us"
] |
2023-03-10T04:30:54+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "labels", "dtype": {"class_label": {"names": {"0": "emotional pain", "1": "hair falling out", "2": "heart hurts", "3": "infected wound", "4": "foot ache", "5": "shoulder pain", "6": "injury from sports", "7": "skin issue", "8": "stomach ache", "9": "knee pain", "10": "joint pain", "11": "hard to breath", "12": "head ache", "13": "body feels weak", "14": "feeling dizzy", "15": "back pain", "16": "open wound", "17": "internal pain", "18": "blurry vision", "19": "acne", "20": "muscle pain", "21": "neck pain", "22": "cough", "23": "ear ache", "24": "feeling cold"}}}}], "splits": [{"name": "train", "num_bytes": 330494.3762197868, "num_examples": 5328}, {"name": "test", "num_bytes": 41373.82675273983, "num_examples": 667}, {"name": "valid", "num_bytes": 41311.79702747335, "num_examples": 666}], "download_size": 144224, "dataset_size": 413180.0}}
|
2023-03-10T04:30:59+00:00
|
32904a482b3038ebc1a578973146d31e9cd0e434
|
# Dataset Card for "annotated-code-functions-base"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
michaelnath/annotated-code-functions-base
|
[
"region:us"
] |
2023-03-10T05:02:01+00:00
|
{"dataset_info": {"features": [{"name": "function", "dtype": "string"}, {"name": "repo_name", "dtype": "string"}, {"name": "features", "sequence": "float64"}], "splits": [{"name": "train", "num_bytes": 15982023, "num_examples": 28383}], "download_size": 5257318, "dataset_size": 15982023}}
|
2023-03-10T05:02:04+00:00
|
aab545bc3e5aaa19bd8d81fb506e288b6fcff7de
|
susie-y/game_category_susie
|
[
"task_categories:text-classification",
"size_categories:n<1K",
"language:zh",
"license:bsd",
"region:us"
] |
2023-03-10T05:10:43+00:00
|
{"language": ["zh"], "license": "bsd", "size_categories": ["n<1K"], "task_categories": ["text-classification"]}
|
2023-03-10T05:29:27+00:00
|
|
cfa17b06ade7e42adaf5d27c6552299e35956c60
|
# Dataset Card for "testdataset"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
leia123/testdataset
|
[
"region:us"
] |
2023-03-10T06:19:55+00:00
|
{"dataset_info": {"features": [{"name": "audio", "sequence": "float32"}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 113600198, "num_examples": 534}, {"name": "test", "num_bytes": 113600198, "num_examples": 534}], "download_size": 228223568, "dataset_size": 227200396}}
|
2023-03-11T06:03:14+00:00
|
0b62c443a6e668668d42317060ae7a9bf72ab762
|
# Dataset of chafen
This dataset is crawled from pixiv, with keyword `アークナイツ R-18`, the top 100 popular illustrations.
The grouped images are placed at subfolders, while free images (should be treated as noise) are placed at `free` subfolder.
|
deepghs/chafen_arknights
|
[
"license:mit",
"region:us"
] |
2023-03-10T06:35:31+00:00
|
{"license": "mit"}
|
2023-03-17T04:44:02+00:00
|
a69d1518ff30147166ee530793ffb9be07e2862a
|
# Dataset Card for "litest"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
liersan/litest
|
[
"region:us"
] |
2023-03-10T06:58:27+00:00
|
{"dataset_info": {"features": [{"name": "audio", "sequence": "float32"}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 113600198, "num_examples": 534}, {"name": "trian", "num_bytes": 113600198, "num_examples": 534}], "download_size": 114111784, "dataset_size": 227200396}}
|
2023-03-10T06:59:19+00:00
|
45f15f42d595fac102f30b0658449f2a173c2490
|
allesrxcv/sovits
|
[
"region:us"
] |
2023-03-10T07:14:01+00:00
|
{}
|
2023-03-10T08:12:57+00:00
|
|
07429ebe5990440844d1100fb95ecdbf0d4ab47c
|
# Dataset Card for "zhengtest"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
liersan/zhengtest
|
[
"region:us"
] |
2023-03-10T07:21:43+00:00
|
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 82929.0, "num_examples": 3}, {"name": "train", "num_bytes": 82929.0, "num_examples": 3}], "download_size": 84632, "dataset_size": 165858.0}}
|
2023-03-10T07:24:24+00:00
|
27936a632f25e33f5bba49bf5b96a8fca4a1fd3e
|
# Dataset Card for "wurongbo"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
wurongbo/wurongbo
|
[
"region:us"
] |
2023-03-10T07:29:27+00:00
|
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 83036.0, "num_examples": 3}, {"name": "train", "num_bytes": 83036.0, "num_examples": 3}], "download_size": 169770, "dataset_size": 166072.0}}
|
2023-03-10T07:36:11+00:00
|
514d14690f60e45cbdfe4cf2ba9b6b1616fbc444
|
# Dataset Card for "bust"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
keishihattori/bust
|
[
"region:us"
] |
2023-03-10T07:42:53+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2084789317.098, "num_examples": 11039}], "download_size": 2024056454, "dataset_size": 2084789317.098}}
|
2023-03-10T08:42:27+00:00
|
2697681168a1510778f21164060b3a22323de974
|
# Dataset Card for "zhentest"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
liersan/zhentest
|
[
"region:us"
] |
2023-03-10T07:46:17+00:00
|
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 82929.0, "num_examples": 3}, {"name": "train", "num_bytes": 82929.0, "num_examples": 3}], "download_size": 169264, "dataset_size": 165858.0}}
|
2023-03-10T07:46:21+00:00
|
0211ace03e635ba3865e72cc0eaaa0eb234c4934
|
# Dataset Card for WebNLG
## Table of Contents
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** [WebNLG 2023 challenge](https://synalp.gitlabpages.inria.fr/webnlg-challenge/challenge_2023/)
- **Repository:** [GitHub repository](https://github.com/WebNLG/2023-Challenge)
- **Paper:**
- **Leaderboard:**
- **Point of Contact:** [[email protected]](mailto:[email protected])
### Dataset Summary
The WebNLG 2023 challenge focuses on four under-resourced languages which are severely under-represented in research on
text generation, namely Maltese, Irish, Breton and Welsh. In addition, WebNLG 2023 once again includes Russian, which
was first featured in WebNLG 2020.
The challenge focuses on RDF-to-text generation, similarly to WebNLG 2017 but targeting Breton, Irish, Maltese, Welsh,
and Russian;
The challenge consists in mapping data to text. The training data consists of Data/Text pairs where the data is a set of
triples extracted from DBpedia and the text is a verbalisation of these triples.
For instance, given the 4 RDF triples:
```
<entry category="Company" eid="Id21" shape="(X (X) (X) (X) (X))" shape_type="sibling" size="4">
<modifiedtripleset>
<mtriple>Trane | foundingDate | 1913-01-01</mtriple>
<mtriple>Trane | location | Ireland</mtriple>
<mtriple>Trane | foundationPlace | La_Crosse,_Wisconsin</mtriple>
<mtriple>Trane | numberOfEmployees | 29000</mtriple>
</modifiedtripleset>
</entry>
```
the aim is to generate a text such as (English text):
```
Trane, which was founded on January 1st 1913 in La Crosse, Wisconsin, is based in Ireland. It has 29,000 employees.
```
or (Russian text):
```
Компания "Тране", основанная 1 января 1913 года в Ла-Кроссе в штате Висконсин, находится в Ирландии. В компании работают 29 тысяч человек.
```
As the example illustrates, the task involves specific NLG subtasks such as sentence segmentation
(how to chunk the input data into sentences), lexicalisation (of the DBpedia properties),
aggregation (how to avoid repetitions) and surface realisation
(how to build a syntactically correct and natural sounding text).
### Supported Tasks and Leaderboards
The dataset supports a Structured to Text task which requires a model takes a set of RDF (Resource Description Format)
triples from a database (DBpedia) of the form (subject, property, object) as input and write out a natural language
sentence expressing the information contained in the triples.
The dataset is used in the [WebNLG 2023](https://synalp.gitlabpages.inria.fr/webnlg-challenge/challenge_2023/)
challenge.
Results are evaluated with automatic metrics: [BLEU](https://huggingface.co/metrics/bleu),
[METEOR](https://huggingface.co/metrics/meteor), [ChrF++](https://huggingface.co/metrics/chrf),
[TER](https://huggingface.co/metrics/ter) and [BERTscore](https://huggingface.co/metrics/bertscore).
Additionally, result are assessed according to criteria such as grammaticality/correctness, appropriateness/adequacy,
fluency/naturalness, etc., by native speakers.
### Languages
The dataset comprises Breton (`br`), Welsh (`cy`), Irish (`ga`), Maltese (`mt`) and Russian (`ru`) languages.
## Dataset Structure
### Data Instances
A typical example contains the original RDF triples in the set, a modified version which presented to crowd workers,
and a set of possible verbalizations for this set of triples:
```
{'category': 'Airport',
'size': 1,
'eid': '1',
'original_triple_sets': {'otriple_set': [['Aarhus_Airport | cityServed | "Aarhus, Denmark"@en']]},
'modified_triple_sets': {'mtriple_set': [['Aarhus_Airport | cityServed | "Aarhus, Denmark"']]},
'shape': '(X (X))',
'shape_type': 'NA',
'lex': {'comment': ['good', 'good', '', ''],
'lid': ['Id1', 'Id2', 'Id3', 'Id3'],
'text': ['Aarhus a zo an aro-vezh Aarhus.',
"Aarhus a servijit ar c'hêr Aarhus.",
'The Aarhus is the airport of Aarhus, Denmark.',
'Aarhus Airport serves the city of Aarhus, Denmark.'],
'lang': ['br', 'br', 'en', 'en']}}
```
### Data Fields
The following fields can be found in the instances:
- `category`: the category of the DBpedia entities present in the RDF triples.
- `eid`: an example ID, only unique per split per category.
- `size`: number of RDF triples in the set.
- `shape`: (since v2) Each set of RDF-triples is a tree, which is characterised by its shape and shape type. `shape` is a string representation of the tree with nested parentheses where X is a node (see [Newick tree format](https://en.wikipedia.org/wiki/Newick_format))
- `shape_type`: (since v2) is a type of the tree shape, which can be: `chain` (the object of one triple is the subject of the other); `sibling` (triples with a shared subject); `mixed` (both chain and sibling types present).
- `test_category`: (for `webnlg_challenge_2017` and `v3`) tells whether the set of RDF triples was present in the training set or not. Several splits of the test set are available: with and without references, and for RDF-to-text generation / for semantic parsing.
- `lex`: the lexicalizations, with:
- `text`: the text to be predicted.
- `lid`: a lexicalization ID, unique per example.
- `comment`: the lexicalizations were rated by crowd workers are either `good` or `bad`
- `lang`: (for `release_v3.0_ru`) the language used because original English texts were kept in the Russian version.
### Data Splits
The dataset is split into train and validation:
| language | train | validation |
|----------|------:|-----------:|
| br | 13211 | 1399 |
| cy | 13211 | 1665 |
| ga | 13211 | 1665 |
| mt | 13211 | 1665 |
| ru | 5573 | 790 |
## Dataset Creation
### Curation Rationale
The WebNLG dataset was created to promote the development _(i)_ of RDF verbalisers and _(ii)_ of microplanners able to handle a wide range of linguistic constructions. The dataset aims at covering knowledge in different domains ("categories"). The same properties and entities can appear in several categories.
### Source Data
The data was compiled from raw DBpedia triples. [This paper](https://www.aclweb.org/anthology/C16-1141/) explains how the triples were selected.
#### Initial Data Collection and Normalization
Initial triples extracted from DBpedia were modified in several ways. See [official documentation](https://webnlg-challenge.loria.fr/docs/) for the most frequent changes that have been made. An original tripleset and a modified tripleset usually represent a one-to-one mapping. However, there are cases with many-to-one mappings when several original triplesets are mapped to one modified tripleset.
Entities that served as roots of RDF trees are listed in [this file](https://gitlab.com/shimorina/webnlg-dataset/-/blob/master/supplementary/entities_dict.json).
The English WebNLG 2020 dataset (v3.0) for training comprises data-text pairs for 16 distinct DBpedia categories:
- The 10 seen categories used in the 2017 version: Airport, Astronaut, Building, City, ComicsCharacter, Food, Monument, SportsTeam, University, and WrittenWork.
- The 5 unseen categories of 2017, which are now part of the seen data: Athlete, Artist, CelestialBody, MeanOfTransportation, Politician.
- 1 new category: Company.
The Russian dataset (v3.0) comprises data-text pairs for 9 distinct categories: Airport, Astronaut, Building, CelestialBody, ComicsCharacter, Food, Monument, SportsTeam, and University.
#### Who are the source language producers?
There are no source texts, all textual material was compiled during the annotation process.
### Annotations
#### Annotation process
Annotators were first asked to create sentences that verbalise single triples. In a second round, annotators were asked to combine single-triple sentences together into sentences that cover 2 triples. And so on until 7 triples. Quality checks were performed to ensure the quality of the annotations. See Section 3.3 in [the dataset paper](https://www.aclweb.org/anthology/P17-1017.pdf).
Russian data was translated from English with an MT system and then was post-edited by crowdworkers. See Section 2.2 of [this paper](https://webnlg-challenge.loria.fr/files/2020.webnlg-papers.7.pdf).
#### Who are the annotators?
All references were collected through crowdsourcing platforms (CrowdFlower/Figure 8 and Amazon Mechanical Turk). For Russian, post-editing was done using the Yandex.Toloka crowdsourcing platform.
### Personal and Sensitive Information
Neither the dataset as published or the annotation process involves the collection or sharing of any kind of personal / demographic information.
## Considerations for Using the Data
### Social Impact of Dataset
We do not foresee any negative social impact in particular from this dataset or task.
Positive outlooks: Being able to generate good quality text from RDF data would permit, e.g., making this data more accessible to lay users, enriching existing text with information drawn from knowledge bases such as DBpedia or describing, comparing and relating entities present in these knowledge bases.
### Discussion of Biases
This dataset is created using DBpedia RDF triples which naturally exhibit biases that have been found to exist in Wikipedia such as some forms of, e.g., gender bias.
The choice of [entities](https://gitlab.com/shimorina/webnlg-dataset/-/blob/master/supplementary/entities_dict.json), described by RDF trees, was not controlled. As such, they may contain gender biases; for instance, all the astronauts described by RDF triples are male. Hence, in texts, pronouns _he/him/his_ occur more often. Similarly, entities can be related to the Western culture more often than to other cultures.
### Other Known Limitations
The quality of the crowdsourced references is limited, in particular in terms of fluency/naturalness of the collected texts.
Russian data was machine-translated and then post-edited by crowdworkers, so some examples may still exhibit issues related to bad translations.
## Additional Information
### Dataset Curators
The principle curator of the dataset is Anastasia Shimorina (Université de Lorraine / LORIA, France). Throughout the WebNLG releases, several people contributed to their construction: Claire Gardent (CNRS / LORIA, France), Shashi Narayan (Google, UK), Laura Perez-Beltrachini (University of Edinburgh, UK), Elena Khasanova, and Thiago Castro Ferreira (Federal University of Minas Gerais, Brazil).
The dataset construction was funded by the French National Research Agency (ANR).
### Licensing Information
The dataset uses the `cc-by-nc-sa-4.0` license. The source DBpedia project uses the `cc-by-sa-3.0` and `gfdl-1.1` licenses.
### Citation Information
If you use the WebNLG corpus, cite:
```
@inproceedings{web_nlg,
author = {Claire Gardent and
Anastasia Shimorina and
Shashi Narayan and
Laura Perez{-}Beltrachini},
editor = {Regina Barzilay and
Min{-}Yen Kan},
title = {Creating Training Corpora for {NLG} Micro-Planners},
booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational
Linguistics, {ACL} 2017, Vancouver, Canada, July 30 - August 4, Volume
1: Long Papers},
pages = {179--188},
publisher = {Association for Computational Linguistics},
year = {2017},
url = {https://doi.org/10.18653/v1/P17-1017},
doi = {10.18653/v1/P17-1017}
}
```
### Contributions
Thanks to [@albertvillanova](https://huggingface.co/albertvillanova) for adding this dataset.
|
webnlg/challenge-2023
|
[
"task_categories:tabular-to-text",
"task_ids:rdf-to-text",
"annotations_creators:found",
"language_creators:crowdsourced",
"multilinguality:multilingual",
"size_categories:10K<n<100K",
"source_datasets:extended|other-db_pedia",
"source_datasets:original",
"language:br",
"language:cy",
"language:ga",
"language:mt",
"language:ru",
"license:cc-by-sa-3.0",
"license:cc-by-nc-sa-4.0",
"license:gfdl",
"region:us"
] |
2023-03-10T08:30:03+00:00
|
{"annotations_creators": ["found"], "language_creators": ["crowdsourced"], "language": ["br", "cy", "ga", "mt", "ru"], "license": ["cc-by-sa-3.0", "cc-by-nc-sa-4.0", "gfdl"], "multilinguality": ["multilingual"], "size_categories": ["10K<n<100K"], "source_datasets": ["extended|other-db_pedia", "original"], "task_categories": ["tabular-to-text"], "task_ids": ["rdf-to-text"], "pretty_name": "WebNLG 2023 challenge", "dataset_info": [{"config_name": "br", "features": [{"name": "category", "dtype": "string"}, {"name": "size", "dtype": "int32"}, {"name": "eid", "dtype": "string"}, {"name": "original_triple_sets", "sequence": [{"name": "otriple_set", "sequence": "string"}]}, {"name": "modified_triple_sets", "sequence": [{"name": "mtriple_set", "sequence": "string"}]}, {"name": "shape", "dtype": "string"}, {"name": "shape_type", "dtype": "string"}, {"name": "lex", "sequence": [{"name": "comment", "dtype": "string"}, {"name": "lid", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "lang", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 14841422, "num_examples": 13211}, {"name": "validation", "num_bytes": 1394620, "num_examples": 1399}], "download_size": 10954332, "dataset_size": 16236042}, {"config_name": "cy", "features": [{"name": "category", "dtype": "string"}, {"name": "size", "dtype": "int32"}, {"name": "eid", "dtype": "string"}, {"name": "original_triple_sets", "sequence": [{"name": "otriple_set", "sequence": "string"}]}, {"name": "modified_triple_sets", "sequence": [{"name": "mtriple_set", "sequence": "string"}]}, {"name": "shape", "dtype": "string"}, {"name": "shape_type", "dtype": "string"}, {"name": "lex", "sequence": [{"name": "comment", "dtype": "string"}, {"name": "lid", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "lang", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 15070109, "num_examples": 13211}, {"name": "validation", "num_bytes": 1605315, "num_examples": 1665}], "download_size": 10954332, "dataset_size": 16675424}, {"config_name": "ga", "features": [{"name": "category", "dtype": "string"}, {"name": "size", "dtype": "int32"}, {"name": "eid", "dtype": "string"}, {"name": "original_triple_sets", "sequence": [{"name": "otriple_set", "sequence": "string"}]}, {"name": "modified_triple_sets", "sequence": [{"name": "mtriple_set", "sequence": "string"}]}, {"name": "shape", "dtype": "string"}, {"name": "shape_type", "dtype": "string"}, {"name": "lex", "sequence": [{"name": "comment", "dtype": "string"}, {"name": "lid", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "lang", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 15219249, "num_examples": 13211}, {"name": "validation", "num_bytes": 1621527, "num_examples": 1665}], "download_size": 10954332, "dataset_size": 16840776}, {"config_name": "mt", "features": [{"name": "category", "dtype": "string"}, {"name": "size", "dtype": "int32"}, {"name": "eid", "dtype": "string"}, {"name": "original_triple_sets", "sequence": [{"name": "otriple_set", "sequence": "string"}]}, {"name": "modified_triple_sets", "sequence": [{"name": "mtriple_set", "sequence": "string"}]}, {"name": "shape", "dtype": "string"}, {"name": "shape_type", "dtype": "string"}, {"name": "lex", "sequence": [{"name": "comment", "dtype": "string"}, {"name": "lid", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "lang", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 15281045, "num_examples": 13211}, {"name": "validation", "num_bytes": 1611988, "num_examples": 1665}], "download_size": 10954332, "dataset_size": 16893033}, {"config_name": "ru", "features": [{"name": "category", "dtype": "string"}, {"name": "size", "dtype": "int32"}, {"name": "eid", "dtype": "string"}, {"name": "original_triple_sets", "sequence": [{"name": "otriple_set", "sequence": "string"}]}, {"name": "modified_triple_sets", "sequence": [{"name": "mtriple_set", "sequence": "string"}]}, {"name": "shape", "dtype": "string"}, {"name": "shape_type", "dtype": "string"}, {"name": "lex", "sequence": [{"name": "comment", "dtype": "string"}, {"name": "lid", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "lang", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 8145815, "num_examples": 5573}, {"name": "validation", "num_bytes": 1122090, "num_examples": 790}], "download_size": 10954332, "dataset_size": 9267905}]}
|
2023-03-10T11:22:40+00:00
|
a9e43a793c030aec62a9fd3551c71d6ae4ca3f03
|
jacobbieker/project-resilience
|
[
"license:mit",
"doi:10.57967/hf/1648",
"region:us"
] |
2023-03-10T08:42:06+00:00
|
{"license": "mit"}
|
2023-04-09T07:46:21+00:00
|
|
7efc7661162d058f2cea7a915abf02a8036d50fd
|
Silverovo/Diaperfur
|
[
"license:apache-2.0",
"region:us"
] |
2023-03-10T08:50:05+00:00
|
{"license": "apache-2.0"}
|
2023-03-10T09:27:56+00:00
|
|
704856927e6bdbb0d739052675490454202d40d2
|
# Dataset Card for "imdb-genre-prediction"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
james-burton/imdb-genre-prediction
|
[
"region:us"
] |
2023-03-10T09:39:40+00:00
|
{"dataset_info": {"features": [{"name": "Description", "dtype": "string"}, {"name": "Genre_is_Drama", "dtype": "bool"}, {"name": "label", "dtype": "bool"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 229204, "num_examples": 680}, {"name": "validation", "num_bytes": 39016, "num_examples": 120}, {"name": "test", "num_bytes": 66552, "num_examples": 200}], "download_size": 244006, "dataset_size": 334772}}
|
2023-03-10T09:39:52+00:00
|
8b6cbbc0b75133beebec63d2ff4af9972187de16
|
# Dataset Card for "imdb_genre_prediction"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
james-burton/imdb_genre_prediction
|
[
"region:us"
] |
2023-03-10T09:41:46+00:00
|
{"dataset_info": {"features": [{"name": "Rank", "dtype": "int64"}, {"name": "Title", "dtype": "string"}, {"name": "Description", "dtype": "string"}, {"name": "Director", "dtype": "string"}, {"name": "Actors", "dtype": "string"}, {"name": "Year", "dtype": "int64"}, {"name": "Runtime (Minutes)", "dtype": "int64"}, {"name": "Rating", "dtype": "float64"}, {"name": "Votes", "dtype": "int64"}, {"name": "Revenue (Millions)", "dtype": "float64"}, {"name": "Metascore", "dtype": "float64"}, {"name": "Genre_is_Drama", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 224399.15, "num_examples": 680}, {"name": "validation", "num_bytes": 39599.85, "num_examples": 120}, {"name": "test", "num_bytes": 65392, "num_examples": 200}], "download_size": 0, "dataset_size": 329391.0}}
|
2023-03-15T10:21:37+00:00
|
c879ff89cfce5d7974795cb50ff721e7904b7d4e
|
# GEM Submission
Submission name: BART-base_Original_CACAPO
|
GEM-submissions/Simon1997__bart-base_original_cacapo__1678442337
|
[
"benchmark:gem",
"evaluation",
"benchmark",
"region:us"
] |
2023-03-10T09:58:57+00:00
|
{"benchmark": "gem", "type": "prediction", "submission_name": "BART-base_Original_CACAPO", "tags": ["evaluation", "benchmark"]}
|
2023-03-10T09:59:00+00:00
|
4f652b1bac72b72757933c6c020a59724c864fba
|
# GEM Submission
Submission name: BART-base_Original_CACAPO
|
GEM-submissions/Simon1997__bart-base_original_cacapo__1678442415
|
[
"benchmark:gem",
"evaluation",
"benchmark",
"region:us"
] |
2023-03-10T10:00:16+00:00
|
{"benchmark": "gem", "type": "prediction", "submission_name": "BART-base_Original_CACAPO", "tags": ["evaluation", "benchmark"]}
|
2023-03-10T10:00:18+00:00
|
49bd6156fe7db315aec039e338e8c9ac7b1b3e35
|
# GEM Submission
Submission name: BART-base_Original_CACAPO
|
GEM-submissions/Simon1997__bart-base_original_cacapo__1678442421
|
[
"benchmark:gem",
"evaluation",
"benchmark",
"region:us"
] |
2023-03-10T10:00:21+00:00
|
{"benchmark": "gem", "type": "prediction", "submission_name": "BART-base_Original_CACAPO", "tags": ["evaluation", "benchmark"]}
|
2023-03-10T10:00:23+00:00
|
e399aaa508f09dc08bf3f3994fd2a07570fddb68
|
# GEM Submission
Submission name: BART-base_Original_CACAPO
|
GEM-submissions/Simon1997__bart-base_original_cacapo__1678442469
|
[
"benchmark:gem",
"evaluation",
"benchmark",
"region:us"
] |
2023-03-10T10:01:09+00:00
|
{"benchmark": "gem", "type": "prediction", "submission_name": "BART-base_Original_CACAPO", "tags": ["evaluation", "benchmark"]}
|
2023-03-10T10:01:11+00:00
|
c639bc6db9236831c9233abebdb45bcb52fb6de7
|
# GEM Submission
Submission name: BART-base_Original_CACAPO
|
GEM-submissions/Simon1997__bart-base_original_cacapo__1678442552
|
[
"benchmark:gem",
"evaluation",
"benchmark",
"region:us"
] |
2023-03-10T10:02:32+00:00
|
{"benchmark": "gem", "type": "prediction", "submission_name": "BART-base_Original_CACAPO", "tags": ["evaluation", "benchmark"]}
|
2023-03-10T10:02:35+00:00
|
45fad694360844f8aa6d907ee9cee357556e88d4
|
# GEM Submission
Submission name: BART-base_Original_CACAPO
|
GEM-submissions/Simon1997__bart-base_original_cacapo__1678442649
|
[
"benchmark:gem",
"evaluation",
"benchmark",
"region:us"
] |
2023-03-10T10:04:09+00:00
|
{"benchmark": "gem", "type": "prediction", "submission_name": "BART-base_Original_CACAPO", "tags": ["evaluation", "benchmark"]}
|
2023-03-10T10:04:11+00:00
|
bd2b2e7263134eaa92b912b9e5c9a9323210d174
|
celikmus/symptom_text_to_disease_01
|
[
"license:apache-2.0",
"region:us"
] |
2023-03-10T10:08:35+00:00
|
{"license": "apache-2.0", "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "labels", "dtype": {"class_label": {"names": {"0": "emotional pain", "1": "hair falling out", "2": "heart hurts", "3": "infected wound", "4": "foot ache", "5": "shoulder pain", "6": "injury from sports", "7": "skin issue", "8": "stomach ache", "9": "knee pain", "10": "joint pain", "11": "hard to breath", "12": "head ache", "13": "body feels weak", "14": "feeling dizzy", "15": "back pain", "16": "open wound", "17": "internal pain", "18": "blurry vision", "19": "acne", "20": "muscle pain", "21": "neck pain", "22": "cough", "23": "ear ache", "24": "feeling cold"}}}}], "splits": [{"name": "train", "num_bytes": 330494.3762197868, "num_examples": 5328}, {"name": "test", "num_bytes": 41373.82675273983, "num_examples": 667}, {"name": "valid", "num_bytes": 41311.79702747335, "num_examples": 666}], "download_size": 145457, "dataset_size": 413180.0}}
|
2023-03-10T10:09:08+00:00
|
|
b97b5aa9062ae8e69cfcfdcc747170a359b42532
|
Falcon2006VN/pascal-code-generation-18mb
|
[
"license:mit",
"region:us"
] |
2023-03-10T10:17:05+00:00
|
{"license": "mit"}
|
2023-03-10T10:17:40+00:00
|
|
3019b760cd849830aec6d0654dbe8a4a03e791d6
|
### Dataset Summary
This dataset contains GPT-3.5 (`text-davinci-003`) generations from MS-MARCO queries.
[Query2doc: Query Expansion with Large Language Models](https://arxiv.org/pdf/2303.07678.pdf) Liang Wang, Nan Yang and Furu Wei
### Data Instances
An example looks as follows.
```
{
"query_id": "1030303",
"query": "who is aziz hashim",
"pseudo_doc": "Aziz Hashim is a renowned entrepreneur, business leader, and one of the most successful restaurant franchise operators in the US. He is the founder of NRD Capital, a private equity firm focused on investments in multi-unit restaurant franchised businesses. Hashim has built a formidable track record of success in the franchise industry, with brands such as Outback Steakhouse and Jamba Juice. His accomplishments and philanthropic initiatives have earned him numerous awards, including the prestigious Ernst and Young Entrepreneur of the Year award."
}
```
### Data Fields
- `query_id`: a `string` feature.
- `query`: a `string` feature.
- `pseudo_doc`: a `string` feature.
### Data Splits
| train | dev | test | trec_dl2019 | trec_dl2020 |
|--------|------:|------:|------:|------:|
| 502939 | 6980 | 6837 | 43 | 54 |
### How to use this dataset
```python
from datasets import load_dataset
dataset = load_dataset('intfloat/query2doc_msmarco')
print(dataset['trec_dl2019'][0])
```
### Reproducing our results
We provide a python script [repro_bm25.py](https://huggingface.co/datasets/intfloat/query2doc_msmarco/blob/main/repro_bm25.py) to reproduce our results with BM25 retrieval.
First install some python dependency packages:
```
pip install pyserini==0.15.0 pytrec_eval datasets tqdm
```
Then download and run the python code:
```
python repro_bm25.py
```
This script utilizes the pre-built Lucene index from [Pyserini](https://github.com/castorini/pyserini/blob/pyserini-0.15.0/docs/prebuilt-indexes.md)
and might yield slightly different results compared to the paper.
### Citation Information
```
@article{wang2023query2doc,
title={Query2doc: Query Expansion with Large Language Models},
author={Wang, Liang and Yang, Nan and Wei, Furu},
journal={arXiv preprint arXiv:2303.07678},
year={2023}
}
```
|
intfloat/query2doc_msmarco
|
[
"size_categories:100K<n<1M",
"language:en",
"license:cc-by-4.0",
"arxiv:2303.07678",
"region:us"
] |
2023-03-10T10:28:59+00:00
|
{"language": ["en"], "license": "cc-by-4.0", "size_categories": ["100K<n<1M"]}
|
2023-03-30T01:44:59+00:00
|
7077c509beedbfb07bf0566ffc6b1a43d7a9da01
|
# Wikitext Document Level
This is a modified version of [https://huggingface.co/datasets/wikitext](https://huggingface.co/datasets/wikitext) that returns Wiki pages instead of Wiki text line-by-line. The original readme is contained below.
# Dataset Card for "wikitext"
## Table of Contents
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** [https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/](https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/)
- **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Paper:** [Pointer Sentinel Mixture Models](https://arxiv.org/abs/1609.07843)
- **Point of Contact:** [Stephen Merity](mailto:[email protected])
- **Size of downloaded dataset files:** 373.28 MB
- **Size of the generated dataset:** 1072.25 MB
- **Total amount of disk used:** 1445.53 MB
### Dataset Summary
The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified
Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike License.
Compared to the preprocessed version of Penn Treebank (PTB), WikiText-2 is over 2 times larger and WikiText-103 is over
110 times larger. The WikiText dataset also features a far larger vocabulary and retains the original case, punctuation
and numbers - all of which are removed in PTB. As it is composed of full articles, the dataset is well suited for models
that can take advantage of long term dependencies.
### Supported Tasks and Leaderboards
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Languages
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Dataset Structure
### Data Instances
#### wikitext-103-raw-v1
- **Size of downloaded dataset files:** 183.09 MB
- **Size of the generated dataset:** 523.97 MB
- **Total amount of disk used:** 707.06 MB
An example of 'validation' looks as follows.
```
This example was too long and was cropped:
{
"text": "\" The gold dollar or gold one @-@ dollar piece was a coin struck as a regular issue by the United States Bureau of the Mint from..."
}
```
#### wikitext-103-v1
- **Size of downloaded dataset files:** 181.42 MB
- **Size of the generated dataset:** 522.66 MB
- **Total amount of disk used:** 704.07 MB
An example of 'train' looks as follows.
```
This example was too long and was cropped:
{
"text": "\" Senjō no Valkyria 3 : <unk> Chronicles ( Japanese : 戦場のヴァルキュリア3 , lit . Valkyria of the Battlefield 3 ) , commonly referred to..."
}
```
#### wikitext-2-raw-v1
- **Size of downloaded dataset files:** 4.50 MB
- **Size of the generated dataset:** 12.91 MB
- **Total amount of disk used:** 17.41 MB
An example of 'train' looks as follows.
```
This example was too long and was cropped:
{
"text": "\" The Sinclair Scientific Programmable was introduced in 1975 , with the same case as the Sinclair Oxford . It was larger than t..."
}
```
#### wikitext-2-v1
- **Size of downloaded dataset files:** 4.27 MB
- **Size of the generated dataset:** 12.72 MB
- **Total amount of disk used:** 16.99 MB
An example of 'train' looks as follows.
```
This example was too long and was cropped:
{
"text": "\" Senjō no Valkyria 3 : <unk> Chronicles ( Japanese : 戦場のヴァルキュリア3 , lit . Valkyria of the Battlefield 3 ) , commonly referred to..."
}
```
### Data Fields
The data fields are the same among all splits.
#### wikitext-103-raw-v1
- `text`: a `string` feature.
#### wikitext-103-v1
- `text`: a `string` feature.
#### wikitext-2-raw-v1
- `text`: a `string` feature.
#### wikitext-2-v1
- `text`: a `string` feature.
### Data Splits
| name | train |validation|test|
|-------------------|------:|---------:|---:|
|wikitext-103-raw-v1|1801350| 3760|4358|
|wikitext-103-v1 |1801350| 3760|4358|
|wikitext-2-raw-v1 | 36718| 3760|4358|
|wikitext-2-v1 | 36718| 3760|4358|
## Dataset Creation
### Curation Rationale
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
#### Who are the source language producers?
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Annotations
#### Annotation process
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
#### Who are the annotators?
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Personal and Sensitive Information
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Discussion of Biases
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Other Known Limitations
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Additional Information
### Dataset Curators
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Licensing Information
The dataset is available under the [Creative Commons Attribution-ShareAlike License (CC BY-SA 4.0)](https://creativecommons.org/licenses/by-sa/4.0/).
### Citation Information
```
@misc{merity2016pointer,
title={Pointer Sentinel Mixture Models},
author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},
year={2016},
eprint={1609.07843},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
```
### Contributions
Thanks to [@thomwolf](https://github.com/thomwolf), [@lewtun](https://github.com/lewtun), [@patrickvonplaten](https://github.com/patrickvonplaten), [@mariamabarham](https://github.com/mariamabarham) for adding this dataset.
|
EleutherAI/wikitext_document_level
|
[
"arxiv:1609.07843",
"region:us"
] |
2023-03-10T10:57:24+00:00
|
{}
|
2023-03-10T11:04:18+00:00
|
9d2eb552c63b6d271356e0633873e28a51c94b99
|
headlessgod/mock_kg
|
[
"license:mit",
"region:us"
] |
2023-03-10T11:31:12+00:00
|
{"license": "mit"}
|
2023-03-10T11:31:57+00:00
|
|
6786c9aa250952a8bf1e51cb016ac4712e301806
|
# Dataset Card for "symptom_text_to_disease_01_strat"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
celikmus/symptom_text_to_disease_01_strat
|
[
"region:us"
] |
2023-03-10T12:01:59+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "labels", "dtype": {"class_label": {"names": {"0": "emotional pain", "1": "hair falling out", "2": "heart hurts", "3": "infected wound", "4": "foot ache", "5": "shoulder pain", "6": "injury from sports", "7": "skin issue", "8": "stomach ache", "9": "knee pain", "10": "joint pain", "11": "hard to breath", "12": "head ache", "13": "body feels weak", "14": "feeling dizzy", "15": "back pain", "16": "open wound", "17": "internal pain", "18": "blurry vision", "19": "acne", "20": "muscle pain", "21": "neck pain", "22": "cough", "23": "ear ache", "24": "feeling cold"}}}}], "splits": [{"name": "train", "num_bytes": 330494.3762197868, "num_examples": 5328}, {"name": "test", "num_bytes": 41373.82675273983, "num_examples": 667}, {"name": "valid", "num_bytes": 41311.79702747335, "num_examples": 666}], "download_size": 146293, "dataset_size": 413180.0}}
|
2023-03-10T12:04:59+00:00
|
37a5c9bcd29a949b24c24dbc69583204ff656fd7
|
**DISCLAIMER:** None of the data here is of my property, but this is rather a extraction and compilation of data from different sources into one common place.
Currently the sources are [spanish CSS10](https://www.kaggle.com/datasets/bryanpark/spanish-single-speaker-speech-dataset) and [this Kaggle Dataset](https://www.kaggle.com/datasets/carlfm01/120h-spanish-speech).
The code used to create combine.zip can be found [here](https://github.com/lopezjuanma96/spanish_voices), it requires you to download the full datasets because the Kaggle API was not working properly, at least for me at the time of creating this: it only allowed me access to the first ~30 files of a dataset when trying to download specifically, the other option was downloading the whole dataset.
The main reason I created this is for my project of adapting [this VITS fine-tuning](https://github.com/Plachtaa/VITS-fast-fine-tuning) script to [spanish](https://github.com/lopezjuanma96/VITS-fast-fine-tuning), therefore the format given to the transcript file and the distribution and amount of audio data, but it can probably be adapted to other formats easily.
|
lopezjm96/spanish_voices
|
[
"region:us"
] |
2023-03-10T13:33:50+00:00
|
{}
|
2023-04-20T01:39:50+00:00
|
e42d330f33d65b7b72dfd408883daf1661f06f18
|
# Cyrillic dataset of 8 Turkic languages spoken in Russia and former USSR
## Dataset Description
The dataset is a part of the [Leipzig Corpora (Wiki) Collection]: https://corpora.uni-leipzig.de/
For the text-classification comparison, Russian has been included to the dataset.
**Paper:**
Dirk Goldhahn, Thomas Eckart and Uwe Quasthoff (2012): Building Large Monolingual Dictionaries at the Leipzig Corpora Collection: From 100 to 200 Languages. In: Proceedings of the Eighth International Conference on Language Resources and Evaluation (LREC'12), 2012.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
- ba - Bashkir
- cv - Chuvash
- sah - Sakha
- tt - Tatar
- ky - Kyrgyz
- kk - Kazakh
- tyv - Tuvinian
- krc - Karachay-Balkar
- ru - Russian
### Data Splits
train: Dataset({
features: ['text', 'label'],
num_rows: 72000
})
test: Dataset({
features: ['text', 'label'],
num_rows: 9000
})
validation: Dataset({
features: ['text', 'label'],
num_rows: 9000
})
## Dataset Creation
[Link to the notebook](https://github.com/tatiana-merz/YakuToolkit/blob/main/CyrillicTurkicCorpus.ipynb)
### Curation Rationale
[More Information Needed]
### Source Data
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
[More Information Needed]
### Citation Information
[More Information Needed]
### Contributions
[More Information Needed]
|
tatiana-merz/cyrillic_turkic_langs
|
[
"task_categories:text-classification",
"size_categories:10K<n<100K",
"language:ba",
"language:cv",
"language:sah",
"language:tt",
"language:ky",
"language:kk",
"language:tyv",
"language:krc",
"language:ru",
"license:cc",
"wiki",
"region:us"
] |
2023-03-10T13:55:33+00:00
|
{"language": ["ba", "cv", "sah", "tt", "ky", "kk", "tyv", "krc", "ru"], "license": "cc", "size_categories": ["10K<n<100K"], "task_categories": ["text-classification"], "tags": ["wiki"]}
|
2023-03-15T19:41:05+00:00
|
e895019ffb6857521af1756c2d85b64e919af32c
|
# Dataset Card for "LowerCourtInsertionSwissJudgmentPrediction": An implementation of lower court insertion bias analysis for Swiss judgment prediction
## Table of Contents
- [Table of Contents](#table-of-contents)
- [Dataset Summary](#dataset-summary)
- [Documents](#documents)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset **str**ucture](#dataset-**str**ucture)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Summary
This dataset contains an implementation of lower-court-insertion for the SwissJudgmentPrediction task.
Note that this dataset only provides a test set and should be used in comination with the [Swiss-Judgment-Prediction](https://huggingface.co/datasets/swiss_judgment_prediction) dataset.
### Documents
Lower-Court-Insertion-Swiss-Judgment-Prediction is a subset of the [Swiss-Judgment-Prediction](https://huggingface.co/datasets/swiss_judgment_prediction) dataset.
The Swiss-Judgment-Prediction dataset is a multilingual, diachronic dataset of 85K Swiss Federal Supreme Court (FSCS) cases annotated with the respective binarized judgment outcome (approval/dismissal), the publication year, the legal area and the canton of origin per case. Lower-Court-Insertion-Swiss-Judgment-Prediction extends this dataset by adding lower court insertion.
### Supported Tasks and Leaderboards
LowerCourtInsertionSwissJudgmentPrediction can be used for performing the LowerCourtInsertion in the legal judgment prediction task.
### Languages
Switzerland has four official languages with 3 languages (German, French and Italian) being represented in more than 1000 Swiss Federal Supreme court decisions. The decisions are written by the judges and clerks in the language of the proceedings.
## Dataset structure
### Data Instances
#### Multilingual use of the dataset
When the dataset is used in a multilingual setting selecting the the 'all' flag:
```python
from datasets import load_dataset
dataset = load_dataset('rcds/lower_court_insertion_swiss_judgment_prediction', 'all')
```
#### Monolingual use of the dataset
When the dataset is used in a monolingual setting selecting the ISO language code for one of the 3 supported languages. For example:
```python
from datasets import load_dataset
dataset = load_dataset('rcds/lower-court-insertion_swiss_judgment_prediction', 'de')
```
### Data Fields
The following data fields are provided for documents (test):
id: (**int**) a unique identifier of the for the document<br/>
year: (**int**) the publication year<br/>
label: (**str**) the judgment outcome: dismissal or approval<br/>
language: (**str**) one of (de, fr, it)<br/>
region: (**str**) the region of the lower court<br/>
canton: (**str**) the canton of the lower court<br/>
legal area: (**str**) the legal area of the case<br/>
explainability_label: (**str**) the explainability label assigned to the occluded text: (Lower court, Baseline)<br/>
text: (**str**) the facts of the case w/o the occluded text except for cases w/ explainability label "Baseline" (contain entire facts)<br/>
lower_court: (**str**) the inserted lower_court (for Baseline there is no insertion)<br/>
### Data Splits (Including Swiss Judgment Prediction)
Language | Subset | Number of Rows (Test)
|-----|-----|------|
German| de| __378__
French | fr| __414__
Italian | it| __335__
All | all | __1127__
Language | Subset | Number of Documents (Test)
| ----------- | ----------- | ----------- |
German| de | __38__
French | fr | __36__
Italian | it | __34__
All | all | __108__
## Dataset Creation
### Curation Rationale
The dataset was curated by Niklaus et al. (2021) and Nina Baumgartner.
### Source Data
#### Initial Data Collection and Normalization
The original data are available at the Swiss Federal Supreme Court (https://www.bger.ch) in unprocessed formats (HTML). The documents were downloaded from the Entscheidsuche portal (https://entscheidsuche.ch) in HTML.
#### Who are the source language producers?
Switzerland has four official languages with 3 languages (German, French and Italian) being represented in more than 1000 Swiss Federal Supreme court decisions. The decisions are written by the judges and clerks in the language of the proceedings.
### Annotations
#### Annotation process
The decisions have been annotated with the binarized judgment outcome using parsers and regular expressions. In addition the a subset of the test set (27 cases in German, 24 in French and 23 in Italian spanning over the years 2017 an 20200) was annotated by legal experts with the lower court. These lower court annotations were then use the insert each lower court into each case once (instead of the original lower court). Allowing an analysis of the changes in the models performance for each inserted lower court, giving insight into a possible bias among them. The legal expert annotation were conducted from April 2020 to August 2020.
#### Who are the annotators?
Joel Niklaus and Adrian Jörg annotated the binarized judgment outcomes. Metadata is published by the Swiss Federal Supreme Court (https://www.bger.ch). The group of legal experts consists of Thomas Lüthi (lawyer), Lynn Grau (law student at master's level) and Angela Stefanelli (law student at master's level).
### Personal and Sensitive Information
The dataset contains publicly available court decisions from the Swiss Federal Supreme Court. Personal or sensitive information has been anonymized by the court before publication according to the following guidelines: https://www.bger.ch/home/juridiction/anonymisierungsregeln.html.
## Additional Information
### Dataset Curators
Niklaus et al. (2021) and Nina Baumgartner
### Licensing Information
We release the data under CC-BY-4.0 which complies with the court licensing (https://www.bger.ch/files/live/sites/bger/files/pdf/de/urteilsveroeffentlichung_d.pdf)
© Swiss Federal Supreme Court, 2000-2020
The copyright for the editorial content of this website and the consolidated texts, which is owned by the Swiss Federal Supreme Court, is licensed under the Creative Commons Attribution 4.0 International licence. This means that you can re-use the content provided you acknowledge the source and indicate any changes you have made.
Source: https://www.bger.ch/files/live/sites/bger/files/pdf/de/urteilsveroeffentlichung_d.pdf
### Citation Information
```
@misc{baumgartner_nina_occlusion_2019,
title = {From Occlusion to Transparancy – An Occlusion-Based Explainability Approach for Legal Judgment Prediction in Switzerland},
shorttitle = {From Occlusion to Transparancy},
abstract = {Natural Language Processing ({NLP}) models have been used for more and more complex tasks such as Legal Judgment Prediction ({LJP}). A {LJP} model predicts the outcome of a legal case by utilizing its facts. This increasing deployment of Artificial Intelligence ({AI}) in high-stakes domains such as law and the involvement of sensitive data has increased the need for understanding such systems. We propose a multilingual occlusion-based explainability approach for {LJP} in Switzerland and conduct a study on the bias using Lower Court Insertion ({LCI}). We evaluate our results using different explainability metrics introduced in this thesis and by comparing them to high-quality Legal Expert Annotations using Inter Annotator Agreement. Our findings show that the model has a varying understanding of the semantic meaning and context of the facts section, and struggles to distinguish between legally relevant and irrelevant sentences. We also found that the insertion of a different lower court can have an effect on the prediction, but observed no distinct effects based on legal areas, cantons, or regions. However, we did identify a language disparity with Italian performing worse than the other languages due to representation inequality in the training data, which could lead to potential biases in the prediction in multilingual regions of Switzerland. Our results highlight the challenges and limitations of using {NLP} in the judicial field and the importance of addressing concerns about fairness, transparency, and potential bias in the development and use of {NLP} systems. The use of explainable artificial intelligence ({XAI}) techniques, such as occlusion and {LCI}, can help provide insight into the decision-making processes of {NLP} systems and identify areas for improvement. Finally, we identify areas for future research and development in this field in order to address the remaining limitations and challenges.},
author = {{Baumgartner, Nina}},
year = {2022},
langid = {english}
}
```
### Contributions
Thanks to [@ninabaumgartner](https://github.com/ninabaumgartner) for adding this dataset.
|
rcds/lower_court_insertion_swiss_judgment_prediction
|
[
"task_categories:text-classification",
"task_categories:other",
"annotations_creators:expert-generated",
"language_creators:expert-generated",
"language_creators:found",
"multilinguality:multilingual",
"size_categories:1K<n<10K",
"source_datasets:extended|swiss_judgment_prediction",
"language:de",
"language:fr",
"language:it",
"language:en",
"license:cc-by-sa-4.0",
"explainability-judgment-prediction",
"region:us"
] |
2023-03-10T14:05:58+00:00
|
{"annotations_creators": ["expert-generated"], "language_creators": ["expert-generated", "found"], "language": ["de", "fr", "it", "en"], "license": ["cc-by-sa-4.0"], "multilinguality": ["multilingual"], "size_categories": ["1K<n<10K"], "source_datasets": ["extended|swiss_judgment_prediction"], "task_categories": ["text-classification", "other"], "task_ids": [], "pretty_name": "LowerCourtInsertionSwissJudgmentPrediction", "tags": ["explainability-judgment-prediction"]}
|
2023-03-28T07:19:04+00:00
|
9f089a3f3528d1bbd5758d2f6d985541c7c0795c
|
# Dataset Card for "en-cat-tedtalk"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
raquelsmv/en-cat-tedtalk
|
[
"region:us"
] |
2023-03-10T14:18:26+00:00
|
{"dataset_info": {"features": [{"name": "en", "dtype": "string"}, {"name": "cat", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 7423333.995939504, "num_examples": 40783}, {"name": "test", "num_bytes": 1855879.0040604954, "num_examples": 10196}], "download_size": 6436438, "dataset_size": 9279213.0}}
|
2023-03-10T14:18:47+00:00
|
1325386eb198e297129bf26964650ebdaa22056f
|
This dataset contains sentences and their scattered counterparts(with no semantic meaning)
|
damilojohn/Text-Descrambling
|
[
"task_categories:text-generation",
"language:en",
"license:apache-2.0",
"region:us"
] |
2023-03-10T14:28:01+00:00
|
{"language": ["en"], "license": "apache-2.0", "task_categories": ["text-generation"]}
|
2023-03-10T14:38:49+00:00
|
dcbaff58902b12fbb2b973e2d3b5b8a5616d9f33
|
# Dataset Card for "TASTESet"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
dmargutierrez/TASTESet
|
[
"task_categories:text-classification",
"task_categories:token-classification",
"size_categories:1K<n<10K",
"language:en",
"food",
"recipes",
"name-entity-recognition",
"region:us"
] |
2023-03-10T14:41:03+00:00
|
{"language": ["en"], "size_categories": ["1K<n<10K"], "task_categories": ["text-classification", "token-classification"], "pretty_name": "TASTESet", "dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "labels", "sequence": "int64"}, {"name": "recipes", "sequence": "string"}, {"name": "prediction_mask", "sequence": "bool"}, {"name": "ner_tags", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 652275.4, "num_examples": 490}, {"name": "test", "num_bytes": 279546.6, "num_examples": 210}], "download_size": 161613, "dataset_size": 931822}, "subtask_categories": ["name-entity-recognition"], "tags": ["food", "recipes", "name-entity-recognition"]}
|
2023-03-17T09:38:31+00:00
|
8207a029e3c5924e6c7920aadc2a9d39990bb914
|
ysharma/dummy123
|
[
"license:mit",
"region:us"
] |
2023-03-10T14:41:31+00:00
|
{"license": "mit"}
|
2023-03-10T14:41:31+00:00
|
|
ac4460eb633dad8d5131cc36915dd1d97b10b8e1
|
NBayer/papers_text_2_summary
|
[
"license:openrail",
"region:us"
] |
2023-03-10T14:42:53+00:00
|
{"license": "openrail"}
|
2023-03-10T14:43:57+00:00
|
|
5e0413cc2c944f973d603faaf1047475e91092d4
|
# Compas
The [Compas dataset](https://github.com/propublica/compas-analysis) for recidivism prediction.
Dataset known to have racial bias issues, check this [Propublica article](https://www.propublica.org/article/machine-bias-risk-assessments-in-criminal-sentencing) on the topic.
# Configurations and tasks
| **Configuration** | **Task** | Description |
|----------------------------------|---------------------------|-----------------------------------------------------------------|
| encoding | | Encoding dictionary showing original values of encoded features.|
| two-years-recidividity | Binary classification | Will the defendant be a violent recidivist? |
| two-years-recidividity-no-race | Binary classification | As above, but the `race` feature is removed. |
| priors-prediction | Regression | How many prior crimes has the defendant committed? |
| priors-prediction-no-race | Binary classification | As above, but the `race` feature is removed. |
| race | Multiclass classification | What is the `race` of the defendant? |
# Usage
```python
from datasets import load_dataset
dataset = load_dataset("mstz/compas", "two-years-recidividity")["train"]
```
# Features
|**Feature** |**Type** |**Description** |
|---------------------------------------|-----------|---------------------------------------|
|`sex` |`int64` | |
|`age` |`int64` | |
|`race` |`int64` | |
|`number_of_juvenile_fellonies` |`int64` | |
|`decile_score` |`int64` |Criminality score |
|`number_of_juvenile_misdemeanors` |`int64` | |
|`number_of_other_juvenile_offenses` |`int64` | |
|`number_of_prior_offenses` |`int64` | |
|`days_before_screening_arrest` |`int64` | |
|`is_recidivous` |`int64` | |
|`days_in_custody` |`int64` |Days spent in custody |
|`is_violent_recidivous` |`int64` | |
|`violence_decile_score` |`int64` |Criminality score for violent crimes |
|`two_years_recidivous` |`int64` | |
|
mstz/compas
|
[
"task_categories:tabular-classification",
"size_categories:1K<n<10K",
"language:en",
"license:cc",
"compas",
"tabular_classification",
"binary_classification",
"UCI",
"region:us"
] |
2023-03-10T14:43:18+00:00
|
{"language": ["en"], "license": "cc", "size_categories": ["1K<n<10K"], "task_categories": ["tabular-classification"], "pretty_name": "Compas", "tags": ["compas", "tabular_classification", "binary_classification", "UCI"], "configs": ["encoding", "two-years-recidividity", "two-years-recidividity-no-race", "priors-prediction", "priors-prediction-no-race", "race"]}
|
2023-04-23T12:57:50+00:00
|
37ae980005f907ab2435d3e2c1db2d6a04add603
|
# Dataset Card for "invisible_char_on_small_persian_QA"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
jalalnb/invisible_char_on_small_persian_QA
|
[
"region:us"
] |
2023-03-10T15:07:09+00:00
|
{"dataset_info": {"features": [{"name": "id", "dtype": "int32"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answers", "sequence": [{"name": "text", "dtype": "string"}, {"name": "answer_start", "dtype": "int32"}]}], "splits": [{"name": "validation", "num_bytes": 335122, "num_examples": 130}, {"name": "train", "num_bytes": 3266777, "num_examples": 1261}], "download_size": 1123718, "dataset_size": 3601899}}
|
2023-03-10T15:07:11+00:00
|
719bf1e8e6be725addf086d48bb86ebdbab0a75e
|
# common_voice_11_0_processed
## Dataset Description
### Dataset Summary
The data files can be found on the illuin gcloud instance at this adress: unknown_url
This dataset has been processed from Huggingface Hub dataset ``mozilla-foundation/common_voice_11_0`` and the config ``fr``
### Supported Tasks and Leaderboards
[More Information Needed]
### Languages
[More Information Needed]
## Dataset Structure
### Data Instances
[More Information Needed]
### Data Fields
[More Information Needed]
#### Columns
``path`` ``audio`` ``sentence`` ``taxonomy`` ``taxonomy_large`` ``sentence_processed``
#### Sample
```
{ 'audio': { 'array': array([ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, ...,
-6.10351562e-05, -6.10351562e-05, 0.00000000e+00]),
'path': None,
'sampling_rate': 16000},
'path': '/home/brunohays/.cache/huggingface/datasets/downloads/extracted/e95cbf12249133734218c89e4b09426a7807adcae4a709e56bc54e89ec65bc58/common_voice_fr_27787676.mp3',
'sentence': 'Il collaborera aussi avec Alan Bennett sur différentes comédies et téléfilm dramatiques.',
'sentence_processed': 'il collaborera aussi avec alan bennett sur différentes comédies et téléfilm dramatiques',
'taxonomy': 'common_voice',
'taxonomy_large': 'common_voice'}
```
### Data Splits
|split|number_of_rows|
|:---:|:---:
|train|487011|
|test|15611|
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
[More Information Needed]
### Annotations
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the dataset
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
Property of Illuin Technology
### Contributions
This dataset has been pushed using the repo [illuin-hf-dataset-pusher](https://gitlab.illuin.tech/data-science/ml/libraries/illuin-hf-dataset-pusher)
|
illuin/common_voice_11_0_processed
|
[
"region:us"
] |
2023-03-10T15:12:52+00:00
|
{}
|
2023-03-11T19:09:20+00:00
|
c524a79a2ff623ea926d08f2d9c64ae6b21e62e0
|
# Dataset Card for "AddAny-dataset"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mohammadhossein/addany-dataset
|
[
"region:us"
] |
2023-03-10T15:34:50+00:00
|
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answers", "struct": [{"name": "answer_start", "sequence": "int64"}, {"name": "text", "sequence": "string"}]}], "splits": [{"name": "train", "num_bytes": 2043824, "num_examples": 1000}, {"name": "validation", "num_bytes": 271573, "num_examples": 130}], "download_size": 933556, "dataset_size": 2315397}}
|
2023-03-10T15:35:52+00:00
|
4a27a2ffbc09e8550ae3a6dccb2b1292791830cd
|
# Dataset Card for "NER_dataset"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Aleereza/NER_dataset
|
[
"region:us"
] |
2023-03-10T15:45:52+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "O", "1": "B-DAT", "2": "I-DAT", "3": "B-EVE", "4": "I-EVE", "5": "B-LOC", "6": "I-LOC", "7": "B-ORG", "8": "I-ORG", "9": "B-PER", "10": "I-PER"}}}}], "splits": [{"name": "train", "num_bytes": 453592585.5859551, "num_examples": 22521473}, {"name": "test", "num_bytes": 25199589.207022466, "num_examples": 1251193}, {"name": "val", "num_bytes": 25199589.207022466, "num_examples": 1251193}], "download_size": 185174999, "dataset_size": 503991764.00000006}}
|
2023-12-26T07:25:55+00:00
|
a98a603ca48c5b2fd1b4340d3a40ae47e41cb60e
|
RGBD-SOD/test
|
[
"size_categories:10K<n<100K",
"RGBD-SOD",
"region:us"
] |
2023-03-10T15:48:51+00:00
|
{"size_categories": ["10K<n<100K"], "dataset_info": [{"config_name": "v1", "features": [{"name": "depth", "dtype": "image"}, {"name": "rgb", "dtype": "image"}, {"name": "gt", "dtype": "image"}, {"name": "name", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4232411, "num_examples": 10}, {"name": "validation", "num_bytes": 4232411, "num_examples": 10}], "download_size": 2917880, "dataset_size": 8464822}, {"config_name": "v2", "features": [{"name": "depth", "dtype": "image"}, {"name": "rgb", "dtype": "image"}, {"name": "gt", "dtype": "image"}, {"name": "name", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4232411, "num_examples": 10}, {"name": "validation", "num_bytes": 4232411, "num_examples": 10}], "download_size": 2917880, "dataset_size": 8464822}], "tags": ["RGBD-SOD"]}
|
2023-03-12T05:45:57+00:00
|
|
f730729fd0fd880c7715b0a1b08a46b313b84312
|
# ParaDetox: Detoxification with Parallel Data (Russian)
This repository contains information about Russian Paradetox dataset -- the first parallel corpus for the detoxification task -- as well as models for the detoxification of Russian texts.
## ParaDetox Collection Pipeline
The ParaDetox Dataset collection was done via [Yandex.Toloka](https://toloka.yandex.com/) crowdsource platform. The collection was done in three steps:
* *Task 1:* **Generation of Paraphrases**: The first crowdsourcing task asks users to eliminate toxicity in a given sentence while keeping the content.
* *Task 2:* **Content Preservation Check**: We show users the generated paraphrases along with their original variants and ask them to indicate if they have close meanings.
* *Task 3:* **Toxicity Check**: Finally, we check if the workers succeeded in removing toxicity.
All these steps were done to ensure high quality of the data and make the process of collection automated. For more details please refer to the original paper.
## Detoxification model
**New SOTA** for detoxification task -- ruT5 (base) model trained on Russian ParaDetox dataset -- we released online in HuggingFace🤗 repository [here](https://huggingface.co/s-nlp/ruT5-base-detox).
You can also check out our [demo](https://detoxifier.nlp.zhores.net/junction/) and telegram [bot](https://t.me/rudetoxifierbot).
## Citation
```
@article{dementievarusse,
title={RUSSE-2022: Findings of the First Russian Detoxification Shared Task Based on Parallel Corpora},
author={Dementieva, Daryna and Logacheva, Varvara and Nikishina, Irina and Fenogenova, Alena and Dale, David and Krotova, Irina and Semenov, Nikita and Shavrina, Tatiana and Panchenko, Alexander}
}
```
## Contacts
If you find some issue, do not hesitate to add it to [Github Issues](https://github.com/s-nlp/russe_detox_2022).
For any questions, please contact: Daryna Dementieva ([email protected])
|
s-nlp/ru_paradetox
|
[
"task_categories:text-generation",
"language:ru",
"license:openrail++",
"region:us"
] |
2023-03-10T15:52:55+00:00
|
{"language": ["ru"], "license": "openrail++", "task_categories": ["text-generation"]}
|
2023-09-07T12:15:00+00:00
|
40c3b2b88a7e6db389062252822326923d3ca0da
|
# Dataset Card for "segment_test"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
wieheistdu/segment_test
|
[
"region:us"
] |
2023-03-10T15:54:56+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "annotation", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 428896877.0, "num_examples": 320}, {"name": "annotation", "num_bytes": 106564278.0, "num_examples": 80}], "download_size": 0, "dataset_size": 535461155.0}}
|
2023-03-12T04:48:48+00:00
|
71dc942178a6c50cd7ea69de5f09797ed8389fdb
|
Entity Disambiguation datasets as provided in the [GENRE](https://github.com/facebookresearch/GENRE/blob/main/scripts_genre/download_all_datasets.sh) repo. The dataset can be used to train and evaluate entity disambiguators.
The datasets can be imported easily as follows:
```
from datasets import load_dataset
ds = load_dataset("boragokbakan/entity_disambiguation", "aida")
```
Available dataset names are:
- `blink`
- `ace2004`
- `aida`
- `aquaint`
- `blink`
- `clueweb`
- `msnbc`
- `wiki`
**Note:** As the BLINK training set is very large in size (~10GB), it is advised to set `streaming=True` when calling `load_dataset`.
|
boragokbakan/entity_disambiguation
|
[
"task_categories:question-answering",
"language:en",
"license:afl-3.0",
"entity disambiguation",
"disambiguation",
"ned",
"GENRE",
"BLINK",
"region:us"
] |
2023-03-10T15:56:58+00:00
|
{"language": ["en"], "license": "afl-3.0", "task_categories": ["question-answering"], "pretty_name": "Entity Disambiguation", "tags": ["entity disambiguation", "disambiguation", "ned", "GENRE", "BLINK"]}
|
2023-03-10T19:29:56+00:00
|
f4f4f9569cacd325b978aee301a4db3dd2d623b0
|
# Open Recipes
Open Recipes is an open database of recipe bookmarks
Data taken from https://github.com/fictivekin/openrecipes
Please credit the original creators.
Collected via:
```bash
curl -O https://s3.amazonaws.com/openrecipes/20170107-061401-recipeitems.json.gz
```
## License
The Open Recipes Database is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by/3.0/deed.en_US">Creative Commons Attribution 3.0 Unported License</a>.
<a rel="license" href="http://creativecommons.org/licenses/by/3.0/deed.en_US"><img alt="Creative Commons License" style="border-width:0" src="http://i.creativecommons.org/l/by/3.0/88x31.png" /></a>
© 2013 Fictive Kin LLC
|
napsternxg/openrecipes-20170107-061401-recipeitems
|
[
"size_categories:100K<n<1M",
"language:en",
"license:cc",
"region:us"
] |
2023-03-10T15:58:22+00:00
|
{"language": ["en"], "license": "cc", "size_categories": ["100K<n<1M"], "pretty_name": "open recipes"}
|
2023-03-10T16:10:36+00:00
|
72a65b98a2b5b0ae515b3c939ab24604c85ac7e5
|
# Dataset Card for "tokenized-codeparrot-train"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
rohitsuv/tokenized-codeparrot-train
|
[
"region:us"
] |
2023-03-10T16:04:01+00:00
|
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "ratio_char_token", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 1485252, "num_examples": 1819}], "download_size": 0, "dataset_size": 1485252}}
|
2023-03-10T18:14:53+00:00
|
b8843cb4ceea0e29b546a0203eb14cb9c19250d2
|
An imitation learning environment for the peg-insert-side-v2 environment, sample for the policy peg-insert-side-v2
This environment was created as part of the Generally Intelligent Agents project gia: https://github.com/huggingface/gia
## Load dataset
First, clone it with
```sh
git clone https://huggingface.co/datasets/qgallouedec/prj_gia_dataset_metaworld_peg_insert_side_v2_1111
```
Then, load it with
```python
import numpy as np
dataset = np.load("prj_gia_dataset_metaworld_peg_insert_side_v2_1111/dataset.npy", allow_pickle=True).item()
print(dataset.keys()) # dict_keys(['observations', 'actions', 'dones', 'rewards'])
```
|
qgallouedec/prj_gia_dataset_metaworld_peg_insert_side_v2_1111
|
[
"deep-reinforcement-learning",
"reinforcement-learning",
"gia",
"multi-task",
"multi-modal",
"imitation-learning",
"offline-reinforcement-learning",
"region:us"
] |
2023-03-10T17:19:32+00:00
|
{"library_name": "gia", "tags": ["deep-reinforcement-learning", "reinforcement-learning", "gia", "multi-task", "multi-modal", "imitation-learning", "offline-reinforcement-learning"]}
|
2023-03-10T17:19:40+00:00
|
bb3be6d11c2d65cdb44adbdc5ac8978a6306dd34
|
An imitation learning environment for the peg-unplug-side-v2 environment, sample for the policy peg-unplug-side-v2
This environment was created as part of the Generally Intelligent Agents project gia: https://github.com/huggingface/gia
## Load dataset
First, clone it with
```sh
git clone https://huggingface.co/datasets/qgallouedec/prj_gia_dataset_metaworld_peg_unplug_side_v2_1111
```
Then, load it with
```python
import numpy as np
dataset = np.load("prj_gia_dataset_metaworld_peg_unplug_side_v2_1111/dataset.npy", allow_pickle=True).item()
print(dataset.keys()) # dict_keys(['observations', 'actions', 'dones', 'rewards'])
```
|
qgallouedec/prj_gia_dataset_metaworld_peg_unplug_side_v2_1111
|
[
"deep-reinforcement-learning",
"reinforcement-learning",
"gia",
"multi-task",
"multi-modal",
"imitation-learning",
"offline-reinforcement-learning",
"region:us"
] |
2023-03-10T17:21:13+00:00
|
{"library_name": "gia", "tags": ["deep-reinforcement-learning", "reinforcement-learning", "gia", "multi-task", "multi-modal", "imitation-learning", "offline-reinforcement-learning"]}
|
2023-03-10T17:21:20+00:00
|
d5739ce0fc56d5dc3048303ee5913698840fae0c
|
An imitation learning environment for the pick-out-of-hole-v2 environment, sample for the policy pick-out-of-hole-v2
This environment was created as part of the Generally Intelligent Agents project gia: https://github.com/huggingface/gia
## Load dataset
First, clone it with
```sh
git clone https://huggingface.co/datasets/qgallouedec/prj_gia_dataset_metaworld_pick_out_of_hole_v2_1111
```
Then, load it with
```python
import numpy as np
dataset = np.load("prj_gia_dataset_metaworld_pick_out_of_hole_v2_1111/dataset.npy", allow_pickle=True).item()
print(dataset.keys()) # dict_keys(['observations', 'actions', 'dones', 'rewards'])
```
|
qgallouedec/prj_gia_dataset_metaworld_pick_out_of_hole_v2_1111
|
[
"deep-reinforcement-learning",
"reinforcement-learning",
"gia",
"multi-task",
"multi-modal",
"imitation-learning",
"offline-reinforcement-learning",
"region:us"
] |
2023-03-10T17:22:49+00:00
|
{"library_name": "gia", "tags": ["deep-reinforcement-learning", "reinforcement-learning", "gia", "multi-task", "multi-modal", "imitation-learning", "offline-reinforcement-learning"]}
|
2023-03-10T17:22:56+00:00
|
f29882b20f63d039006b3013bf9d41515f4dec75
|
An imitation learning environment for the pick-place-v2 environment, sample for the policy pick-place-v2
This environment was created as part of the Generally Intelligent Agents project gia: https://github.com/huggingface/gia
## Load dataset
First, clone it with
```sh
git clone https://huggingface.co/datasets/qgallouedec/prj_gia_dataset_metaworld_pick_place_v2_1111
```
Then, load it with
```python
import numpy as np
dataset = np.load("prj_gia_dataset_metaworld_pick_place_v2_1111/dataset.npy", allow_pickle=True).item()
print(dataset.keys()) # dict_keys(['observations', 'actions', 'dones', 'rewards'])
```
|
qgallouedec/prj_gia_dataset_metaworld_pick_place_v2_1111
|
[
"deep-reinforcement-learning",
"reinforcement-learning",
"gia",
"multi-task",
"multi-modal",
"imitation-learning",
"offline-reinforcement-learning",
"region:us"
] |
2023-03-10T17:24:31+00:00
|
{"library_name": "gia", "tags": ["deep-reinforcement-learning", "reinforcement-learning", "gia", "multi-task", "multi-modal", "imitation-learning", "offline-reinforcement-learning"]}
|
2023-03-10T17:24:38+00:00
|
036f3a582aa35e3b9349fdd37e0ce165d3765899
|
An imitation learning environment for the pick-place-wall-v2 environment, sample for the policy pick-place-wall-v2
This environment was created as part of the Generally Intelligent Agents project gia: https://github.com/huggingface/gia
## Load dataset
First, clone it with
```sh
git clone https://huggingface.co/datasets/qgallouedec/prj_gia_dataset_metaworld_pick_place_wall_v2_1111
```
Then, load it with
```python
import numpy as np
dataset = np.load("prj_gia_dataset_metaworld_pick_place_wall_v2_1111/dataset.npy", allow_pickle=True).item()
print(dataset.keys()) # dict_keys(['observations', 'actions', 'dones', 'rewards'])
```
|
qgallouedec/prj_gia_dataset_metaworld_pick_place_wall_v2_1111
|
[
"deep-reinforcement-learning",
"reinforcement-learning",
"gia",
"multi-task",
"multi-modal",
"imitation-learning",
"offline-reinforcement-learning",
"region:us"
] |
2023-03-10T17:26:11+00:00
|
{"library_name": "gia", "tags": ["deep-reinforcement-learning", "reinforcement-learning", "gia", "multi-task", "multi-modal", "imitation-learning", "offline-reinforcement-learning"]}
|
2023-03-10T17:26:19+00:00
|
8808e1ec383e19c7c124d776adeed2e7c121468c
|
An imitation learning environment for the plate-slide-back-side-v2 environment, sample for the policy plate-slide-back-side-v2
This environment was created as part of the Generally Intelligent Agents project gia: https://github.com/huggingface/gia
## Load dataset
First, clone it with
```sh
git clone https://huggingface.co/datasets/qgallouedec/prj_gia_dataset_metaworld_plate_slide_back_side_v2_1111
```
Then, load it with
```python
import numpy as np
dataset = np.load("prj_gia_dataset_metaworld_plate_slide_back_side_v2_1111/dataset.npy", allow_pickle=True).item()
print(dataset.keys()) # dict_keys(['observations', 'actions', 'dones', 'rewards'])
```
|
qgallouedec/prj_gia_dataset_metaworld_plate_slide_back_side_v2_1111
|
[
"deep-reinforcement-learning",
"reinforcement-learning",
"gia",
"multi-task",
"multi-modal",
"imitation-learning",
"offline-reinforcement-learning",
"region:us"
] |
2023-03-10T17:27:44+00:00
|
{"library_name": "gia", "tags": ["deep-reinforcement-learning", "reinforcement-learning", "gia", "multi-task", "multi-modal", "imitation-learning", "offline-reinforcement-learning"]}
|
2023-03-10T17:27:51+00:00
|
9a848cb55de765d04e7cc0500a0b9c2bbc54b0a9
|
An imitation learning environment for the plate-slide-back-v2 environment, sample for the policy plate-slide-back-v2
This environment was created as part of the Generally Intelligent Agents project gia: https://github.com/huggingface/gia
## Load dataset
First, clone it with
```sh
git clone https://huggingface.co/datasets/qgallouedec/prj_gia_dataset_metaworld_plate_slide_back_v2_1111
```
Then, load it with
```python
import numpy as np
dataset = np.load("prj_gia_dataset_metaworld_plate_slide_back_v2_1111/dataset.npy", allow_pickle=True).item()
print(dataset.keys()) # dict_keys(['observations', 'actions', 'dones', 'rewards'])
```
|
qgallouedec/prj_gia_dataset_metaworld_plate_slide_back_v2_1111
|
[
"deep-reinforcement-learning",
"reinforcement-learning",
"gia",
"multi-task",
"multi-modal",
"imitation-learning",
"offline-reinforcement-learning",
"region:us"
] |
2023-03-10T17:29:14+00:00
|
{"library_name": "gia", "tags": ["deep-reinforcement-learning", "reinforcement-learning", "gia", "multi-task", "multi-modal", "imitation-learning", "offline-reinforcement-learning"]}
|
2023-03-10T17:29:20+00:00
|
4a1baf314299aa0ef9b9e5001efff23522632f48
|
An imitation learning environment for the plate-slide-side-v2 environment, sample for the policy plate-slide-side-v2
This environment was created as part of the Generally Intelligent Agents project gia: https://github.com/huggingface/gia
## Load dataset
First, clone it with
```sh
git clone https://huggingface.co/datasets/qgallouedec/prj_gia_dataset_metaworld_plate_slide_side_v2_1111
```
Then, load it with
```python
import numpy as np
dataset = np.load("prj_gia_dataset_metaworld_plate_slide_side_v2_1111/dataset.npy", allow_pickle=True).item()
print(dataset.keys()) # dict_keys(['observations', 'actions', 'dones', 'rewards'])
```
|
qgallouedec/prj_gia_dataset_metaworld_plate_slide_side_v2_1111
|
[
"deep-reinforcement-learning",
"reinforcement-learning",
"gia",
"multi-task",
"multi-modal",
"imitation-learning",
"offline-reinforcement-learning",
"region:us"
] |
2023-03-10T17:30:57+00:00
|
{"library_name": "gia", "tags": ["deep-reinforcement-learning", "reinforcement-learning", "gia", "multi-task", "multi-modal", "imitation-learning", "offline-reinforcement-learning"]}
|
2023-03-10T17:31:03+00:00
|
bfb8104192f2007179dd80b713dd5e07a6dd3cf5
|
# Dataset Card for "pokemon-name-description-small"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mfumanelli/pokemon-name-description-small
|
[
"region:us"
] |
2023-03-10T17:31:58+00:00
|
{"dataset_info": {"features": [{"name": "description", "dtype": "string"}, {"name": "name", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4323, "num_examples": 73}], "download_size": 3327, "dataset_size": 4323}}
|
2023-03-10T17:35:59+00:00
|
5c31d3cb7c9c969bac5914fe78a2e213ccaf2d4b
|
An imitation learning environment for the plate-slide-v2 environment, sample for the policy plate-slide-v2
This environment was created as part of the Generally Intelligent Agents project gia: https://github.com/huggingface/gia
## Load dataset
First, clone it with
```sh
git clone https://huggingface.co/datasets/qgallouedec/prj_gia_dataset_metaworld_plate_slide_v2_1111
```
Then, load it with
```python
import numpy as np
dataset = np.load("prj_gia_dataset_metaworld_plate_slide_v2_1111/dataset.npy", allow_pickle=True).item()
print(dataset.keys()) # dict_keys(['observations', 'actions', 'dones', 'rewards'])
```
|
qgallouedec/prj_gia_dataset_metaworld_plate_slide_v2_1111
|
[
"deep-reinforcement-learning",
"reinforcement-learning",
"gia",
"multi-task",
"multi-modal",
"imitation-learning",
"offline-reinforcement-learning",
"region:us"
] |
2023-03-10T17:32:31+00:00
|
{"library_name": "gia", "tags": ["deep-reinforcement-learning", "reinforcement-learning", "gia", "multi-task", "multi-modal", "imitation-learning", "offline-reinforcement-learning"]}
|
2023-03-10T17:32:37+00:00
|
94874ec4792140f459a40afe8940805aaf1e28bf
|
An imitation learning environment for the push-back-v2 environment, sample for the policy push-back-v2
This environment was created as part of the Generally Intelligent Agents project gia: https://github.com/huggingface/gia
## Load dataset
First, clone it with
```sh
git clone https://huggingface.co/datasets/qgallouedec/prj_gia_dataset_metaworld_push_back_v2_1111
```
Then, load it with
```python
import numpy as np
dataset = np.load("prj_gia_dataset_metaworld_push_back_v2_1111/dataset.npy", allow_pickle=True).item()
print(dataset.keys()) # dict_keys(['observations', 'actions', 'dones', 'rewards'])
```
|
qgallouedec/prj_gia_dataset_metaworld_push_back_v2_1111
|
[
"deep-reinforcement-learning",
"reinforcement-learning",
"gia",
"multi-task",
"multi-modal",
"imitation-learning",
"offline-reinforcement-learning",
"region:us"
] |
2023-03-10T17:34:07+00:00
|
{"library_name": "gia", "tags": ["deep-reinforcement-learning", "reinforcement-learning", "gia", "multi-task", "multi-modal", "imitation-learning", "offline-reinforcement-learning"]}
|
2023-03-10T17:34:13+00:00
|
ca779422bb63e847ac14406f48f9d8ae301f4dd1
|
An imitation learning environment for the push-v2 environment, sample for the policy push-v2
This environment was created as part of the Generally Intelligent Agents project gia: https://github.com/huggingface/gia
## Load dataset
First, clone it with
```sh
git clone https://huggingface.co/datasets/qgallouedec/prj_gia_dataset_metaworld_push_v2_1111
```
Then, load it with
```python
import numpy as np
dataset = np.load("prj_gia_dataset_metaworld_push_v2_1111/dataset.npy", allow_pickle=True).item()
print(dataset.keys()) # dict_keys(['observations', 'actions', 'dones', 'rewards'])
```
|
qgallouedec/prj_gia_dataset_metaworld_push_v2_1111
|
[
"deep-reinforcement-learning",
"reinforcement-learning",
"gia",
"multi-task",
"multi-modal",
"imitation-learning",
"offline-reinforcement-learning",
"region:us"
] |
2023-03-10T17:35:45+00:00
|
{"library_name": "gia", "tags": ["deep-reinforcement-learning", "reinforcement-learning", "gia", "multi-task", "multi-modal", "imitation-learning", "offline-reinforcement-learning"]}
|
2023-03-10T17:35:51+00:00
|
9d14b3b8b85eb9310c32c474ac108869b4f0f973
|
# Dataset Card for "TinyCalliFont"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
arbml/TinyCalliFont
|
[
"region:us"
] |
2023-03-10T17:56:45+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 489803410.8, "num_examples": 1600}, {"name": "test", "num_bytes": 61689478.0, "num_examples": 200}, {"name": "validation", "num_bytes": 62852027.0, "num_examples": 200}], "download_size": 617998205, "dataset_size": 614344915.8}}
|
2023-03-10T17:57:27+00:00
|
23c6fbc7c5d021e0528deeb0bcfe882c13c0c8bf
|
Maghrebi/Abaza
|
[
"task_categories:text2text-generation",
"size_categories:10K<n<100K",
"language:ab",
"license:cc-by-2.0",
"code",
"region:us"
] |
2023-03-10T18:08:38+00:00
|
{"language": ["ab"], "license": "cc-by-2.0", "size_categories": ["10K<n<100K"], "task_categories": ["text2text-generation"], "pretty_name": "maghrebi/abaza", "tags": ["code"]}
|
2023-03-10T18:13:25+00:00
|
|
8ba030abc7dc8f7e1e4394f92c9384bd2f4f3694
|
**CoT Datasets from Google's FLan Dataset**
|
iamplus/CoT
|
[
"license:apache-2.0",
"region:us"
] |
2023-03-10T18:49:59+00:00
|
{"license": "apache-2.0"}
|
2023-07-10T05:52:45+00:00
|
afc6bcb1734112a3292ae6e58860ff093c4100b6
|
This dataset is curated by [GIZ Data Service Center](https://www.giz.de/expertise/html/63018.html) in the form of Sqaud dataset with features `question`, `answer`, `answer_start`, `context` and `language`.
The source dataset for this comes from [Changing Transport Tracker](https://changing-transport.org/tracker/),
where partners analyze Intended nationally determined contribution (INDC), NDC and Revised/Updated NDC of countries to understand transport related climate mitigation actions.
Specifications
- Dataset size: 3194
- Language: English, Spanish, French
|
alessio-vertemati/ikitracs-qa
|
[
"task_categories:question-answering",
"size_categories:1K<n<10K",
"language:en",
"language:es",
"language:fr",
"license:apache-2.0",
"region:us"
] |
2023-03-10T19:44:14+00:00
|
{"language": ["en", "es", "fr"], "license": "apache-2.0", "size_categories": ["1K<n<10K"], "task_categories": ["question-answering"]}
|
2023-03-21T17:02:02+00:00
|
f4ce8a93caf3bff6f8f3ab2460c5b7db3782aac3
|
# Dataset Card for "github-issues_big"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Kasper7953/github-issues_big
|
[
"region:us"
] |
2023-03-10T19:52:11+00:00
|
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int64"}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 80208096.0, "num_examples": 20012}, {"name": "val", "num_bytes": 14156256.0, "num_examples": 3532}], "download_size": 26942780, "dataset_size": 94364352.0}}
|
2023-03-10T19:52:23+00:00
|
85a1abd0c7152a20b81da727599786fca479c372
|
# Dataset Card for "github-issues_small"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Kasper7953/github-issues_small
|
[
"region:us"
] |
2023-03-10T19:55:26+00:00
|
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int64"}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 2837664.0, "num_examples": 708}, {"name": "val", "num_bytes": 505008.0, "num_examples": 126}], "download_size": 1006544, "dataset_size": 3342672.0}}
|
2023-03-10T19:55:30+00:00
|
aedc54f1626195c246e64f67a4d21869daf9bff8
|
# Metaphors and analogies datasets
These datasets contain word pairs and quadruples forming analogies, metaphoric mapping or sematically unacceptable compositions.
- Pair instances are pairs of nouns A and B in a sentence of the form "A is a B".
- Quadruple instances are of the form : < (A,B),(C,D) >
There is an analogy when A is to B what C is to D.
The analogy is also a metaphor when the (A,B) and (C,D) form a metaphoric mapping, usually when they come from different domains.
## Dataset Description
- **Homepage:**
- **Repository:**
- **Paper:**
- **Leaderboard:**
- **Point of Contact:**
Language : English
### Datasets and paper links
| Name | Size | Labels | Description |
| ---------: | :----- |:-------- | :-------------------------------------------------------------------------- |
| `Cardillo` | 260 *2 | 1, 2 | Pairs of "A is-a B" sentences composed of one metaphoric and one literal sentence. The two sentences of a given pair share the same B term. |
| `Jankowiak`| 120*3 | 0, 1, 2 | Triples of "A is-a/is-like-a B" sentences with exactly one literal, one semantic abnormal and one metaphoric sentence. |
| `Green` | 40*3 | 0, 1, 2 | Triples of proportional analogies, made of 4 terms <A, B, Ci, Di> each. One stem <A,B> is composed with 3 different <Ci,Di> pairs, to form exaclty one near analogy, one far analogy and one non analogic quadruple|
| `Kmiecik` | 720 | 0, 1, 2 | Quadruples <A,B,C,D> labelled as analogy:True/False and far_analogy: True/False|
| `SAT-met` | 160?*5 | 0, 1, 2, 12 | One pair stem <A,B> to combine with 5 different pairs <Ci,Di> and attempt to form proportional analogies. Only one <Ci,Di> forms an analogy with <A,B> We additionally labelled the analogies as **metaphoric**:True/False|
| Name | Paper Citation | Paper link | Dataset link |
| ---------: | :------- | :------------------------------ |-----------------------------------------: |
| `Cardillo` | | [Cardillo (2010)](https://link.springer.com/article/10.3758/s13428-016-0717-1) [Cardillo (2017)](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2952404/ ) | |
| `Jankowiak`| | [Jankowiak (2020)]( https://link-springer-com.abc.cardiff.ac.uk/article/10.1007/s10936-020-09695-7) | |
| `Green` | Green, A. E., Kraemer, D. J. M., Fugelsang, J., Gray, J. R., & Dunbar, K. (2010). Connecting Long Distance: Semantic Distance in Analogical Reasoning Modulates Frontopolar Cortex Activity. Cerebral Cortex, 10, 70-76. | [Green (20)]() ||
| `Kmiecik` |Kmiecik, M. J., Brisson, R. J., & Morrison, R. G. (2019). The time course of semantic and relational processing during verbal analogical reasoning. Brain and Cognition, 129, 25-34. | [Kmiecik (20)]() ||
| `SAT-met` | | [Turney (2005)](https://arxiv.org/pdf/cs/0508053.pdf) | |
### Labels :
- Pairs
- **0** : anomaly
- **1** : literal
- **2** : metaphor
- Quadruples :
- **0** : not an analogy
- **1** : an analogy but not a metaphor
- **2** : an analogy and a metaphor or a far analogy
- **12** : maybe a metaphor, somewhere between 1 and 2
### Dataset Splits
- Both lexical and random splits are available for classification experiments.
- Size of the splits :
- **train** : 50 %
- **validation** : 10 %
- **test** : 40 %
- Additionally, for all datasets, the `5-folds` field gives frozen splits for a five-folds cross validation experiment with train/val/test = 70/10/20% of the sets.
# Datasets for Classification
- Task : binary classification or 3-classes classification of pairs or quadruples. Each pair or quadruple is to classify between anomaly, non-metaphoric and metaphoric.
## Pairs
### Datasets names & splits :
| Original set | Dataset name | Split |
|-------------:| :------------ | :------ |
| Cardillo | Pairs\_Cardillo\_random_split | random |
| | Pairs\_Cardillo\_lexical_split | lexical |
| Jankowiac | Pairs\_Jankowiac\_random_split | random |
| | Pairs\_Jankowiac\_lexical_split | lexical |
### Data fields :
| Field | Description | Type |
| -------------:| :------------ | ---- |
| corpus | name of the orgiginal dataset | str |
| id | instance id | str |
| set_id | id of the set containing the given instance in the multiple choice task | int |
| label | 0, 1, 2 | int |
| sentence | A is-a B sentence. | str |
| A | A expression in the sentence | str |
| B | B expression in the sentence | str |
| A\_position | position of A in the sentence | list(int) |
| B\_position | position of B in the sentence | list(int) |
| 5-folds | frozen splits for cross validation | list(str) |
### Examples :
| Name | Example | Label|
| -------: | :------------------------------------- | :-------- |
|Cardillo | | |
|Jankowiac | | |
## Quadruples
### Datasets names & splits
| Original set | dataset name | Split |
| -------: | :------------------------------------- | :-------- |
|Green | Quadruples\_Green\_random_split | random |
| | Quadruples\_Green\_lexical_split | lexical |
|Kmiecik | Quadruples\_Kmiecik\_random_split | random |
| | Quadruples\_Kmiecik\_lexical\_split\_on\_AB | lexical AB |
| | Quadruples\_Kmiecik\_lexical_split\_on\_CD | lexical CD |
|SAT | Quadruples\_SAT\_random\_split | random | random |
| | Quadruples\_SAT\_lexical\_split | lexical | lexical |
### Data fields :
| Field| Description | Type |
| -------------: | :------------ | :------------ |
| corpus | Name of the orgiginal dataset | str |
| id | Element id | str |
| set\_id | Id of the set containing the given instance in the multiple-choice task datasets | int |
| label | 0, 1, 2, 12 | int |
| AB | pair of terms | list(str) |
| CD | pair of terms | list(str) |
| 5-folds | frozen splits for cross validation | list(str) |
### Examples :
| Name | Example | Label|
|-------: | :------------------------------------- | :-------- |
|Green | | |
|Kmiecik | | |
| SAT | | |
# Datasets for multiple choice questions or permutation
- Task : One stem and multiple choices. The stem and its possible combinations are to be combined to form a sentence. The resulting sentence has a label <0,1,2>.
## Pairs
### Datasets names & splits :
| Original set | dataset name | Split |
| -----------|------| :---- |
| Cardillo | Pairs\_Cardillo\_set | test only |
| Jankowiac | Pairs\_Jankowiac\_set |test only |
### Data fields :
| Field | Description | Type |
| -------------: | :------------ | :------------ |
| corpus | Name of the orgiginal dataset | str |
| id | Element id | str |
| pair_ids | Ids of each pair as appearing in the classification datasets. | list(str) |
| labels | 0, 1, 2 | list(int) |
| sentences | List of the sentences composing the set | list(str) |
| A\_positions | Positions of the A's in each sentence | list(list(int)) |
| B\_positions | Positions of the B's in each sentence | list(list(int)) |
| answer | Index of the metaphor | int |
| stem | Term shared between the sentences of the set. | str |
| 5-folds | frozen splits for cross validation | list(str) |
### Examples :
| Name | Stem | Sentences |Label|
|-------: |-------: | :------------------------------------- | :-------- |
|Cardillo | comet | The astronomer's obssession was a comet. | 1 |
| | | The politician's career was a comet. | 2 |
| Jankoviac | harbour | This banana is like a harbour | 0 |
| | | A house is a harbour | 2|
| | | This area is a harbour | 1 |
## Quadruples
### Datasets names & splits :
| Original set | dataset name | Split |
| ----------: | :------| :---- |
| Green | Quadruples\_Green\_set | test only |
| SAT | Quadruples\_SAT\_met_set | test only |
### Data fields :
| Field | Description | Type |
|-------------: | :------------ | :------------ |
| corpus | name of the orgiginal dataset | str |
| id | Element id | str |
| pair\_ids | Ids of the instances as appearing in the clasification datasets | list(str) |
| labels | 0, 1, 2, 12 | list(int) |
| answer | temp | int |
| stem | Word pair to compose with all the other pairs of the set | list(str) |
| pairs | List of word pairs | list(list(str)) |
| 5-folds | Frozen splits for cross validation | list(str) |
### Examples :
| Name | Example | Label|
|-------: | :------------------------------------- | :-------- |
|Green | | |
| | | |
| SAT | | |
|
Joanne/Metaphors_and_Analogies
|
[
"task_categories:question-answering",
"task_categories:token-classification",
"language:en",
"region:us"
] |
2023-03-10T19:57:35+00:00
|
{"language": ["en"], "task_categories": ["question-answering", "token-classification"]}
|
2023-05-30T19:40:56+00:00
|
f8cee6b17e27d9dabbfa4a674c8ed428c62fd72a
|
# MIMIC-IV Clinical Database Demo
The Medical Information Mart for Intensive Care (MIMIC)-IV database is comprised
of deidentified electronic health records for patients admitted to the Beth Israel
Deaconess Medical Center. Access to MIMIC-IV is limited to credentialed users.
Here, we have provided an openly-available demo of MIMIC-IV containing a subset
of 100 patients. The dataset includes similar content to MIMIC-IV, but excludes
free-text clinical notes. The demo may be useful for running workshops and for
assessing whether the MIMIC-IV is appropriate for a study before making an access
request.
For details on the data, see the MIMIC-IV project on PhysioNet:
https://doi.org/10.13026/07hj-2a80
The contents of this project also contain an additional file:
demo_subject_id.csv
This is a CSV file containing the subject_id used to filter MIMIC-IV. Only
these subject_id are available in the demo.
|
physionet/mimic-iv-demo
|
[
"license:odbl",
"region:us"
] |
2023-03-10T20:36:28+00:00
|
{"license": "odbl"}
|
2023-03-10T21:25:43+00:00
|
6d8acbdc8e167a7b530118519787dd43c885e05d
|
# Dataset Card for "entofr"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
raquelsmv/entofr
|
[
"region:us"
] |
2023-03-10T20:47:37+00:00
|
{"dataset_info": {"features": [{"name": "en", "dtype": "string"}, {"name": "fr", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1168821.9757975799, "num_examples": 7999}, {"name": "test", "num_bytes": 292242.02420242026, "num_examples": 2000}], "download_size": 476822, "dataset_size": 1461064.0}}
|
2023-03-10T20:47:56+00:00
|
72cc9090fce5dd5783c82c5df86450c81307c498
|
# Dataset Card for "yolov5-predictions"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
nielsr/yolov5-predictions
|
[
"region:us"
] |
2023-03-10T21:18:39+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "objects", "struct": [{"name": "bbox", "sequence": {"sequence": "int64"}}, {"name": "categories", "sequence": "int64"}]}, {"name": "prediction", "sequence": {"sequence": "float64"}}], "splits": [{"name": "test", "num_bytes": 5075872429.0, "num_examples": 385}], "download_size": 5075043191, "dataset_size": 5075872429.0}}
|
2023-03-10T21:21:42+00:00
|
be26433ddaceaffb9a2ad4dcccd437c664cde3c4
|
- This Dataset has been downloaded from PubMed
- It has abstracts and titles that are related to acute lymphoblastic leukemia
- the data has been cleaned before uploading
- it could be used for any NLP task, such as Domain Adaptation
|
Gaborandi/Acute_Lymphoblastic_Leukemia_pubmed_abstracts
|
[
"region:us"
] |
2023-03-10T21:38:18+00:00
|
{}
|
2023-03-10T21:41:34+00:00
|
c23b4db05acb4de5083dc34c4721346de1c04a34
|
- This Dataset has been downloaded from PubMed
- It has abstracts and titles that are related to Stroke
- the data has been cleaned before uploading
- it could be used for any NLP task, such as Domain Adaptation
|
Gaborandi/Stroke_pubmed_abstracts
|
[
"region:us"
] |
2023-03-10T22:03:29+00:00
|
{}
|
2023-03-10T22:04:28+00:00
|
062de68479d00474fcda9cf69183a4ab7c4aecd5
|
# Dataset Card for "well-pad-us-group-5-100-100"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
neelr11/well-pad-us-group-5-100-100
|
[
"region:us"
] |
2023-03-11T00:05:13+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "index", "dtype": "int64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 25132222.0, "num_examples": 200}], "download_size": 25126330, "dataset_size": 25132222.0}}
|
2023-03-11T00:05:19+00:00
|
a7b5eb8a6c861dda13ebc27fc2115846974646ec
|
# Dataset Card for "glue_text_to_text"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
hanmaegeo/glue_text_to_text
|
[
"region:us"
] |
2023-03-11T00:20:20+00:00
|
{"dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "target", "dtype": "string"}], "splits": [{"name": "validation", "num_bytes": 12895402, "num_examples": 69711}, {"name": "test", "num_bytes": 68584768, "num_examples": 425205}], "download_size": 42875561, "dataset_size": 81480170}}
|
2023-03-14T23:13:33+00:00
|
c72ca64e4f6f1bc79b455753f242c93d89255a76
|
## Dataset description
This dataset was used to fine-tune this [model](keras-dreambooth/dreambooth_diffusion_minercraft)
## Demo
You can try with this [demo](https://huggingface.co/spaces/keras-dreambooth/minecraft-landscape-demo)
## Intended uses & limitations
A lot of image is belonging to landscape in Minecraft world
|
keras-dreambooth/minercraft
|
[
"size_categories:n<1K",
"license:apache-2.0",
"keras-dreambooth",
"scifi",
"diffusers",
"text-to-image",
"region:us"
] |
2023-03-11T01:42:59+00:00
|
{"license": "apache-2.0", "size_categories": ["n<1K"], "tags": ["keras-dreambooth", "scifi", "diffusers", "text-to-image"]}
|
2023-03-13T21:32:56+00:00
|
acd2e4565ebb091b134254dd44daa417055493c8
|
priyam314/NST
|
[
"size_categories:1K<n<10K",
"language:en",
"region:us"
] |
2023-03-11T02:27:12+00:00
|
{"language": ["en"], "size_categories": ["1K<n<10K"], "pretty_name": "NST-Intermediate"}
|
2023-03-11T03:16:12+00:00
|
|
dd543ef8969212ffb1b056659049127681b780c8
|
# Dataset Card for "movie_captioned-augmented"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
jlbaker361/movie_captioned-augmented
|
[
"region:us"
] |
2023-03-11T02:50:57+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "src", "dtype": "string"}, {"name": "split", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "caption", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 138703022.0, "num_examples": 542}], "download_size": 138688197, "dataset_size": 138703022.0}}
|
2023-03-17T08:04:47+00:00
|
696ed44008fdf10efc4726a46d568a6b8817d0bf
|
# Dataset Card for "well-pad-global-500-500"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
neelr11/well-pad-global-500-500
|
[
"region:us"
] |
2023-03-11T05:16:19+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "index", "dtype": "int64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 103120399.0, "num_examples": 1000}], "download_size": 103077439, "dataset_size": 103120399.0}}
|
2023-03-11T05:16:26+00:00
|
28e5704a376e778b212cd310faedf670c56cad85
|
# Dataset Card for "well-pad-global-500-0"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
neelr11/well-pad-global-500-0
|
[
"region:us"
] |
2023-03-11T05:49:06+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "index", "dtype": "int64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 343119936.674, "num_examples": 4998}], "download_size": 378900610, "dataset_size": 343119936.674}}
|
2023-03-11T05:49:24+00:00
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.