sha
stringlengths
40
40
text
stringlengths
0
13.4M
id
stringlengths
2
117
tags
list
created_at
stringlengths
25
25
metadata
stringlengths
2
31.7M
last_modified
stringlengths
25
25
742643cec01b0defc3d34c6ac1648d3fddeee887
# Dataset Card for "Bloom-560m-trained-on-Wizard-Vicuna-Uncensored-trained-on-Based" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
player1537/Bloom-560m-trained-on-Wizard-Vicuna-Uncensored-trained-on-Based
[ "region:us" ]
2023-06-06T20:31:13+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 1512752, "num_examples": 120}], "download_size": 323831, "dataset_size": 1512752}}
2023-06-06T20:53:55+00:00
ed789e61d5e22c5657061102310e18ad03c85d32
spacemanidol/MSMARCO-Doc-v1-Summaries
[ "task_categories:text-classification", "size_categories:1M<n<10M", "language:en", "license:apache-2.0", "search", "information retrieval", "region:us" ]
2023-06-06T20:41:28+00:00
{"language": ["en"], "license": "apache-2.0", "size_categories": ["1M<n<10M"], "task_categories": ["text-classification"], "pretty_name": "MSMARCO-Doc-Summaries", "tags": ["search", "information retrieval"]}
2023-07-19T17:19:05+00:00
42d28f3085bfeb546fd7b32482488752b1947f7a
# Dataset Card for "numpy-docs" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
lgfunderburk/numpy-docs
[ "region:us" ]
2023-06-06T20:46:19+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "summary", "dtype": "string"}, {"name": "social_media_post", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 14323, "num_examples": 13}], "download_size": 16015, "dataset_size": 14323}}
2023-06-06T21:59:07+00:00
25667f0b4432bb77dd1a4cb5f6d008d3e945d5db
# Dataset Card for "fd1e16b9" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
results-sd-v1-5-sd-v2-1-if-v1-0-karlo/fd1e16b9
[ "region:us" ]
2023-06-06T20:51:05+00:00
{"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 180, "num_examples": 10}], "download_size": 1338, "dataset_size": 180}}
2023-06-06T20:51:06+00:00
11adceb9cbc24a6462532316290250b0afe91c4b
Part of **BEIR-PL: Zero Shot Information Retrieval Benchmark for the Polish Language**. Link to arxiv: https://arxiv.org/pdf/2305.19840.pdf Contact: [email protected]
clarin-knext/fiqa-pl-qrels
[ "language:pl", "arxiv:2305.19840", "region:us" ]
2023-06-06T20:58:39+00:00
{"language": ["pl"]}
2023-06-07T07:22:36+00:00
8634c07806d5cce3a6138e260e59b81760a0a640
Part of **BEIR-PL: Zero Shot Information Retrieval Benchmark for the Polish Language**. Link to arxiv: https://arxiv.org/pdf/2305.19840.pdf Contact: [email protected]
clarin-knext/msmarco-pl
[ "language:pl", "arxiv:2305.19840", "region:us" ]
2023-06-06T21:02:28+00:00
{"language": ["pl"]}
2023-06-07T07:22:03+00:00
e4b37068b0c682698f293c98b3736388f14f174a
Part of **BEIR-PL: Zero Shot Information Retrieval Benchmark for the Polish Language**. Link to arxiv: https://arxiv.org/pdf/2305.19840.pdf Contact: [email protected]
clarin-knext/msmarco-pl-qrels
[ "language:pl", "arxiv:2305.19840", "region:us" ]
2023-06-06T21:03:21+00:00
{"language": ["pl"]}
2023-06-07T07:21:32+00:00
63fc86750af76253e8c760fc9e534bbf24d260a2
Part of **BEIR-PL: Zero Shot Information Retrieval Benchmark for the Polish Language**. Link to arxiv: https://arxiv.org/pdf/2305.19840.pdf Contact: [email protected]
clarin-knext/arguana-pl
[ "language:pl", "arxiv:2305.19840", "region:us" ]
2023-06-06T21:10:02+00:00
{"language": ["pl"]}
2023-06-07T07:18:37+00:00
825bcec6b8cdd62f8f147fd99867c4d8f214dc94
Part of **BEIR-PL: Zero Shot Information Retrieval Benchmark for the Polish Language**. Link to arxiv: https://arxiv.org/pdf/2305.19840.pdf Contact: [email protected]
clarin-knext/arguana-pl-qrels
[ "language:pl", "arxiv:2305.19840", "region:us" ]
2023-06-06T21:13:33+00:00
{"language": ["pl"]}
2023-06-07T07:16:24+00:00
0be27e93455051e531182b85e85e425aba12e9d4
Part of **BEIR-PL: Zero Shot Information Retrieval Benchmark for the Polish Language**. Link to arxiv: https://arxiv.org/pdf/2305.19840.pdf Contact: [email protected]
clarin-knext/quora-pl
[ "language:pl", "arxiv:2305.19840", "region:us" ]
2023-06-06T21:16:05+00:00
{"language": ["pl"]}
2023-06-07T07:16:00+00:00
357662166eb64dc43b0523fd7b686a5f8c02efc6
Part of **BEIR-PL: Zero Shot Information Retrieval Benchmark for the Polish Language**. Link to arxiv: https://arxiv.org/pdf/2305.19840.pdf Contact: [email protected]
clarin-knext/quora-pl-qrels
[ "language:pl", "arxiv:2305.19840", "region:us" ]
2023-06-06T21:18:44+00:00
{"language": ["pl"]}
2023-06-07T07:13:49+00:00
a0bd479ac97b4ccb5bd6ce320c415d0bb4beb907
Part of **BEIR-PL: Zero Shot Information Retrieval Benchmark for the Polish Language**. Link to arxiv: https://arxiv.org/pdf/2305.19840.pdf Contact: [email protected]
clarin-knext/hotpotqa-pl
[ "language:pl", "arxiv:2305.19840", "region:us" ]
2023-06-06T21:21:34+00:00
{"language": ["pl"]}
2023-06-07T07:13:33+00:00
2f9e02945453cf5a9391e6e4958b2736f7501164
Part of **BEIR-PL: Zero Shot Information Retrieval Benchmark for the Polish Language**. Link to arxiv: https://arxiv.org/pdf/2305.19840.pdf Contact: [email protected]
clarin-knext/hotpotqa-pl-qrels
[ "language:pl", "arxiv:2305.19840", "region:us" ]
2023-06-06T21:25:23+00:00
{"language": ["pl"]}
2023-06-07T07:13:10+00:00
f0b285d49b464aebfe2da14f6fb11c151b6da3df
# Dataset Card for "gpteacher_reward_modeling_pairwise" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
andersonbcdefg/gpteacher_reward_modeling_pairwise
[ "region:us" ]
2023-06-06T21:26:29+00:00
{"dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "response_a", "dtype": "string"}, {"name": "response_b", "dtype": "string"}, {"name": "explanation", "dtype": "string"}, {"name": "preferred", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 11418828, "num_examples": 7721}], "download_size": 6134214, "dataset_size": 11418828}}
2023-06-06T21:26:38+00:00
76afe41d9af165cc40999fcaa92312b8b012064a
Part of **BEIR-PL: Zero Shot Information Retrieval Benchmark for the Polish Language**. Link to arxiv: https://arxiv.org/pdf/2305.19840.pdf Contact: [email protected]
clarin-knext/dbpedia-pl
[ "language:pl", "arxiv:2305.19840", "region:us" ]
2023-06-06T21:28:09+00:00
{"language": ["pl"]}
2023-06-07T07:12:53+00:00
a018c859bfb7e7001832e61605dd32daa0bd1c51
Part of **BEIR-PL: Zero Shot Information Retrieval Benchmark for the Polish Language**. Link to arxiv: https://arxiv.org/pdf/2305.19840.pdf Contact: [email protected]
clarin-knext/dbpedia-pl-qrels
[ "language:pl", "arxiv:2305.19840", "region:us" ]
2023-06-06T21:28:53+00:00
{"language": ["pl"]}
2023-06-07T07:12:37+00:00
81bcb408f33366c2a20ac54adafad1ae7e877fdd
Part of **BEIR-PL: Zero Shot Information Retrieval Benchmark for the Polish Language**. Link to arxiv: https://arxiv.org/pdf/2305.19840.pdf Contact: [email protected]
clarin-knext/trec-covid-pl
[ "language:pl", "arxiv:2305.19840", "region:us" ]
2023-06-06T21:34:50+00:00
{"language": ["pl"]}
2023-06-07T07:12:18+00:00
a65bb7e1a203fead968509e143432019793523cf
Part of **BEIR-PL: Zero Shot Information Retrieval Benchmark for the Polish Language**. Link to arxiv: https://arxiv.org/pdf/2305.19840.pdf Contact: [email protected]
clarin-knext/trec-covid-pl-qrels
[ "language:pl", "arxiv:2305.19840", "region:us" ]
2023-06-06T21:38:14+00:00
{"language": ["pl"]}
2023-06-07T07:11:44+00:00
9a6f9567fda928260afed2de480d79c98bf0bec0
Part of **BEIR-PL: Zero Shot Information Retrieval Benchmark for the Polish Language**. Link to arxiv: https://arxiv.org/pdf/2305.19840.pdf Contact: [email protected]
clarin-knext/nfcorpus-pl
[ "language:pl", "arxiv:2305.19840", "region:us" ]
2023-06-06T21:41:34+00:00
{"language": ["pl"]}
2023-06-07T07:11:26+00:00
963a8c51513374e39b9ec9c263036d6000647519
Part of **BEIR-PL: Zero Shot Information Retrieval Benchmark for the Polish Language**. Link to arxiv: https://arxiv.org/pdf/2305.19840.pdf Contact: [email protected]
clarin-knext/nfcorpus-pl-qrels
[ "language:pl", "arxiv:2305.19840", "region:us" ]
2023-06-06T21:44:12+00:00
{"language": ["pl"]}
2023-06-07T07:10:48+00:00
d0345ec2228c5e0d90e40eadb9f1addb9f4e1b22
# Dataset Card for "fire-srf" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
vsrirama/fire-srf
[ "region:us" ]
2023-06-06T21:48:23+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 688502.0, "num_examples": 21}, {"name": "validation", "num_bytes": 280620.0, "num_examples": 16}], "download_size": 0, "dataset_size": 969122.0}}
2023-07-02T23:57:16+00:00
45452b03f05560207ef19149545f168e596c9337
Part of **BEIR-PL: Zero Shot Information Retrieval Benchmark for the Polish Language**. Link to arxiv: https://arxiv.org/pdf/2305.19840.pdf Contact: [email protected]
clarin-knext/scidocs-pl
[ "language:pl", "arxiv:2305.19840", "region:us" ]
2023-06-06T21:48:25+00:00
{"language": ["pl"]}
2023-06-07T07:10:24+00:00
746c40da1a8ff14a33e902969368ad97877d4ade
# Dataset Card for Dataset Name ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
yuanzheng625/auto-retrain-input-dataset
[ "task_categories:image-classification", "size_categories:1K<n<10K", "language:en", "license:apache-2.0", "region:us" ]
2023-06-06T21:51:43+00:00
{"language": ["en"], "license": "apache-2.0", "size_categories": ["1K<n<10K"], "task_categories": ["image-classification"], "pretty_name": "tiny_demo1"}
2023-06-07T05:00:24+00:00
4f0f7d88314eacee80ecf3500cb6da8f36cc8060
Part of **BEIR-PL: Zero Shot Information Retrieval Benchmark for the Polish Language**. Link to arxiv: https://arxiv.org/pdf/2305.19840.pdf Contact: [email protected]
clarin-knext/scidocs-pl-qrels
[ "language:pl", "arxiv:2305.19840", "region:us" ]
2023-06-06T21:52:13+00:00
{"language": ["pl"]}
2023-06-07T07:09:59+00:00
8b714e695c676b1dae2125aa4df58d52361515f9
# Dataset Card for "gan_decoding" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
thomaslu/gan_decoding
[ "region:us" ]
2023-06-06T23:31:55+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 13114591.0, "num_examples": 100}, {"name": "train", "num_bytes": 52458378.0, "num_examples": 400}], "download_size": 32155767, "dataset_size": 65572969.0}}
2023-06-07T06:13:55+00:00
afe40fd74ac90a16caad8579a547512fc95c1a29
# Dataset Card for TROPICAL ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Configurations](#data-configurations) - [Use this Dataset](#use-this-dataset) - [Dataset Creation](#dataset-creation) - [Source Data](#source-data) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - **Repository:** [TROPICAL dataset repository](https://github.com/GePaSud/TROPICAL) - **Paper:** - **Point of Contact:** ### Dataset Summary The TROPICAL dataset is a French-language dataset for sentiment analysis. The dataset contains comments left by French-speaking tourists' on TripAdvisor after their visit to French Polynesia, each review either concern a hotel or a guesthouse. The format is JSON. The comments spanning from January 2001 to April 2023, the dataset contain 1592 comments along with 10729 ASTE triplets (aspect, opinion, sentiment). The unsplitted dataset is available in our Github repository. ### Languages The text in the dataset is in French as it was written by French speakers. ## Dataset Structure ### Data Instances Normaly the polarity of the triplets are either "POS", "NEG" or "NEU", due to using [ClassLabel](https://huggingface.co/docs/datasets/v2.13.0/en/package_reference/main_classes#datasets.ClassLabel) the polarity is represented by 0, 1 or 2. | String label | Int label | | ------------ | --------- | | POS | 0 | | NEG | 1 | | NEU | 2 | An example from the TROPICAL original dataset looks like the following: ```json { "id_comment": "16752", "words": ["Nous", "avons", "passé", "4", "nuits", "dans", "cet", "établissement", "Ce", "fut", "un", "très", "bon", "moment", "Le", "personnel", "très", "aimable", "et", "serviable", "Nous", "avons", "visité", "les", "plantations", "d'ananas", "en", "4/4", "et", "ce", "fut", "un", "agréable", "moment", "nous", "avons", "fait", "le", "tour", "de", "l'île", "et", "c't", "une", "splendeur", "Nous", "sommes", "revenus", "enchantés"], "triplets": [ {"aspect_term": ["Aspect inexistant"], "opinion_term": ["revenus", "enchantés"], "aspect_position": [-1], "opinion_position": [47, 48], "polarity": "POS"}, {"aspect_term": ["tour", "de", "l'île"], "opinion_term": ["une", "splendeur"], "aspect_position": [38, 39, 40], "opinion_position": [43, 44], "polarity": "POS"}, {"aspect_term": ["moment"], "opinion_term": ["agréable"], "aspect_position": [33], "opinion_position": [32], "polarity": "POS"}, {"aspect_term": ["personnel"], "opinion_term": ["serviable"], "aspect_position": [15], "opinion_position": [19], "polarity": "POS"}, {"aspect_term": ["personnel"], "opinion_term": ["très", "aimable"], "aspect_position": [15], "opinion_position": [16, 17], "polarity": "POS"}, {"aspect_term": ["moment"], "opinion_term": ["très", "bon"], "aspect_position": [13], "opinion_position": [11, 12], "polarity": "POS"} ], "general_polarity": "POS" } ``` ### Data Fields - 'id_comment': a string containing the review id - 'words': an array of strings composing the comment - 'triplets': a list of dictionnaries containing the following informations - 'aspect_term': an array of strings composing the aspect term (can be a single word or a multi-word expression) - 'opinion_term': an array of strings composing the opinion term (can be a single word or a multi-word expression) - 'aspect_position': an array of integers indicating the position of the aspect term in the words array (can be a single integer list or a list of integers) - 'opinion_position': an array of integers indicating the position of the opinion term in the review (can be a single integer list or a list of integers) - 'polartiy': an integer, either _0_, _1_, or _2_, indicating a _positive_, _negative_, or _neutral_ sentiment, respectively - 'general_polarity': an integer, either _0_, _1_, or _2_, indicating a _positive_, _negative_, or _neutral_ sentiment, respectively ### Data configurations The TROPICAL dataset has 3 configurations: _original_, _no overlapping_, and _overlapping_.The first one contains the 1592 comments. The overlapping dataset contains the comments that have at least one overlapping triplet. The no overlapping dataset contains the comments that have no overlapping triplet. | Dataset Configuration | Number of comments | Number of triplets | Positive triplets | Negative triplets | Neutral triplets | | --------------------- | ------------------ | ------------------ | ----------------- | ----------------- | -----------------| | original | 1,592 | 10,729 | 9,889 | 734 | 106 | | no_overlapping | 467 | 2,235 | 2,032 | 184 | 19 | | overlapping | 1,125 | 8,494 | 7,857 | 550 | 87 | The following table show the splits of the dataset for all configurations: | Dataset Configuration | Train | Test | Val | | --------------------- | ----- | ---- | --- | | original | 1,114 | 239 | 239 | | no_overlapping | 326 | 70 | 71 | | overlapping | 787 | 169 | 169 | The split values for train, test, validation are 70%, 15%, 15% respectively. The seed used is 42. ## Use this dataset ```python from datasets import load_dataset dataset = load_dataset("TROPICAL", "original") # or "no_overlapping" or "overlapping" ``` ## Dataset Creation ### Source Data All the comments were collected from the TripAdvisor website. The comments range from January 2001 to April 2023. The dataset contains 1592 comments along with 10729 ASTE triplets (aspect, opinion, sentiment). ### Who are the source language producers? The dataset contains tourists' comments about French Polynesia stored on the [TripAdvisor](https://www.tripadvisor.com/) website. ### Known limitations The dataset contains only comments about French Polynesia. Moreover, the dataset is not balanced, the number of positive triplets is much higher than the number of negative and neutral triplets. ## Additional Information ### Licensing Information The TROPICAL dataset is licensed under the [MIT License](https://opensource.org/licenses/MIT). ### Citation Information > To be added...
GePaSud/TROPICAL
[ "task_categories:text-classification", "language:fr", "license:mit", "region:us" ]
2023-06-07T00:02:59+00:00
{"language": ["fr"], "license": "mit", "task_categories": ["text-classification"], "configs": [{"config_name": "original_dataset", "data_files": [{"split": "train", "path": "original_dataset/train.jsonl"}, {"split": "test", "path": "original_dataset/test.jsonl"}, {"split": "validation", "path": "original_dataset/val.jsonl"}]}, {"config_name": "overlapping_subset", "data_files": [{"split": "train", "path": "overlapping_subset/train.jsonl"}, {"split": "test", "path": "overlapping_subset/test.jsonl"}, {"split": "validation", "path": "overlapping_subset/val.jsonl"}]}, {"config_name": "no_overlapping_subset", "data_files": [{"split": "train", "path": "no_overlapping_subset/train.jsonl"}, {"split": "test", "path": "no_overlapping_subset/test.jsonl"}, {"split": "validation", "path": "no_overlapping_subset/val.jsonl"}]}], "dataset_info": [{"config_name": "original_dataset", "features": [{"name": "id_comment", "dtype": "string"}, {"name": "words", "sequence": "string"}, {"name": "triplets", "list": [{"name": "aspect_term", "sequence": "string"}, {"name": "opinion_term", "sequence": "string"}, {"name": "aspect_position", "sequence": "int32"}, {"name": "opinion_position", "sequence": "int32"}, {"name": "polarity", "dtype": {"class_label": {"names": {"0": "POS", "1": "NEG", "2": "NEU"}}}}]}, {"name": "general_polarity", "dtype": {"class_label": {"names": {"0": "POS", "1": "NEG", "2": "NEU"}}}}], "splits": [{"name": "train", "num_bytes": 1115671, "num_examples": 1114}, {"name": "test", "num_bytes": 239799, "num_examples": 239}, {"name": "validation", "num_bytes": 237621, "num_examples": 239}], "download_size": 2471854, "dataset_size": 1593091}, {"config_name": "no_overlapping_subset", "features": [{"name": "id_comment", "dtype": "string"}, {"name": "words", "sequence": "string"}, {"name": "triplets", "list": [{"name": "aspect_term", "sequence": "string"}, {"name": "opinion_term", "sequence": "string"}, {"name": "aspect_position", "sequence": "int32"}, {"name": "opinion_position", "sequence": "int32"}, {"name": "polarity", "dtype": {"class_label": {"names": {"0": "POS", "1": "NEG", "2": "NEU"}}}}]}, {"name": "general_polarity", "dtype": {"class_label": {"names": {"0": "POS", "1": "NEG", "2": "NEU"}}}}], "splits": [{"name": "train", "num_bytes": 270313, "num_examples": 326}, {"name": "test", "num_bytes": 61779, "num_examples": 70}, {"name": "validation", "num_bytes": 59399, "num_examples": 71}], "download_size": 581415, "dataset_size": 391491}, {"config_name": "overlapping_subset", "features": [{"name": "id_comment", "dtype": "string"}, {"name": "words", "sequence": "string"}, {"name": "triplets", "list": [{"name": "aspect_term", "sequence": "string"}, {"name": "opinion_term", "sequence": "string"}, {"name": "aspect_position", "sequence": "int32"}, {"name": "opinion_position", "sequence": "int32"}, {"name": "polarity", "dtype": {"class_label": {"names": {"0": "POS", "1": "NEG", "2": "NEU"}}}}]}, {"name": "general_polarity", "dtype": {"class_label": {"names": {"0": "POS", "1": "NEG", "2": "NEU"}}}}], "download_size": 1890439, "dataset_size": 1201600}]}
2023-11-23T02:14:36+00:00
54dbe1e2e677cd50feff00f62651c8de69178fe2
# Dataset Card for Dataset Name ### Dataset Summary It is just a dataset of dolly-15k-jp(*1) converted to jsonl form so that it can be used in SFTTrainer(*2)'s dataset_text_field property. (*1)https://huggingface.co/datasets/kunishou/databricks-dolly-15k-ja (*2)https://huggingface.co/docs/trl/main/en/sft_trainer ### Languages ja ### Licensing Information This dataset is licensed under CC BY SA 3.0 Special Thanks https://huggingface.co/datasets/kunishou/databricks-dolly-15k-ja
Coaso/test-dolly-15ja-for-stftrainer
[ "task_categories:question-answering", "size_categories:1K<n<10K", "language:ja", "license:cc-by-sa-3.0", "region:us" ]
2023-06-07T00:40:58+00:00
{"language": ["ja"], "license": "cc-by-sa-3.0", "size_categories": ["1K<n<10K"], "task_categories": ["question-answering"]}
2023-06-07T01:29:14+00:00
be7cbb20e16c74b181eb3eef2f4abfe42e4d5640
# Chest X-Ray Pneumonia Dataset This dataset contains chest x-ray images of independent patients that can be classified into `normal` (healthy) or `pneumonia` (diseased) patients. This dataset is a processed version of the original `Large Dataset of Labeled Optical Coherence Tomography (OCT) and Chest X-Ray Images` dataset provided by the *University of California San Diego*. The dataset contains three splits: - **Train**: 4187 images - **Validation**: 1045 images - **Test**: 624 images The shape of the images is `[500, 500, 3]`, and the labels have two possible values: - 0: **Normal** - 1: **Pneumonia** >**References**: > > - Kermany, Daniel; Zhang, Kang; Goldbaum, Michael (2018), “Large Dataset of Labeled Optical Coherence Tomography (OCT) and Chest X-Ray Images”, Mendeley Data, V3, doi: 10.17632/rscbjbr9sj.3
mmenendezg/pneumonia_x_ray
[ "region:us" ]
2023-06-07T01:00:53+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "normal", "1": "pneumonia"}}}}], "splits": [{"name": "train", "num_bytes": 126906525.958, "num_examples": 4187}, {"name": "validation", "num_bytes": 27684376.78, "num_examples": 1045}, {"name": "test", "num_bytes": 16275405.0, "num_examples": 624}], "download_size": 153423742, "dataset_size": 170866307.738}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}]}
2023-12-05T00:01:34+00:00
94f0e5bf31393d1f7292510e854f1e0d577d4213
# EVJVQA - Multilingual Visual Question Answering ## Abstract Visual Question Answering (VQA) is a challenging task of natural language processing (NLP) and computer vision (CV), attracting significant attention from researchers. English is a resource-rich language that has witnessed various developments in datasets and models for visual question answering. Visual question answering in other languages also would be developed for resources and models. In addition, there is no multilingual dataset targeting the visual content of a particular country with its own objects and cultural characteristics. To address the weakness, we provide the research community with a benchmark dataset named EVJVQA, including 33,000+ pairs of question-answer over three languages: Vietnamese, English, and Japanese, on approximately 5,000 images taken from Vietnam for evaluating multilingual VQA systems or models. EVJVQA is used as a benchmark dataset for the challenge of multilingual visual question answering at the 9th Workshop on Vietnamese Language and Speech Processing (VLSP 2022). This task attracted 62 participant teams from various universities and organizations. In this article, we present details of the organization of the challenge, an overview of the methods employed by shared-task participants, and the results. The highest performances are 0.4392 in F1-score and 0.4009 in BLUE on the private test set. The multilingual QA systems proposed by the top 2 teams use ViT for the pre-trained vision model and mT5 for the pre-trained language model, a powerful pre-trained language model based on the transformer architecture. EVJVQA is a challenging dataset that motivates NLP and CV researchers to further explore the multilingual models or systems for visual question answering systems. We released the challenge on the Codalab evaluation system for further research. ## Links - https://arxiv.org/abs/2302.11752 - https://codalab.lisn.upsaclay.fr/competitions/12274
dinhanhx/evjvqa
[ "task_categories:visual-question-answering", "task_ids:visual-question-answering", "language:en", "language:vi", "language:ja", "license:unknown", "evjvqa", "arxiv:2302.11752", "region:us" ]
2023-06-07T01:36:55+00:00
{"language": ["en", "vi", "ja"], "license": "unknown", "task_categories": ["visual-question-answering"], "task_ids": ["visual-question-answering"], "pretty_name": "EVJVQA - Multilingual Visual Question Answering", "source-datasets": ["original"], "tags": ["evjvqa"]}
2023-06-24T00:55:42+00:00
51dcab02d911ef8ae010bab67be7bca12c7e121a
# Gaze 360 All recorded gaze data are in 'Gaze_txt_files' directory, under this directory, the data for one person is placed in one directory and the data for one video is stored in one txt file. each line in txt file is organized as follows: "frame, frame index, forward, head position x, head position y, eye, gaze position x, gaze position y", where frame index starts from 1, head position and gaze position are the position of head and eye in the panorama image, they are fractional from 0.0 to 1.0 with respect to the panorama image and computed from left bottom corner The 360 degree videos are in 'videos' and coming soon.
Morning5/Gaze360
[ "region:us" ]
2023-06-07T02:23:05+00:00
{}
2023-06-07T02:41:24+00:00
f7505131eebf9ef1b0435015563a168716059ed2
# Dataset Card for Dataset Name ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
Vinomaly/1k-sample-comex-split
[ "task_categories:feature-extraction", "task_categories:text-generation", "size_categories:1K<n<10K", "language:es", "region:us" ]
2023-06-07T02:27:21+00:00
{"language": ["es"], "size_categories": ["1K<n<10K"], "task_categories": ["feature-extraction", "text-generation"]}
2023-06-07T02:30:06+00:00
fa36079d15a9a4b78fca051867dafee01b14b8f4
# Dataset Card for "gen.4.cats.book" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
lansinuote/gen.4.cats.book
[ "region:us" ]
2023-06-07T02:33:30+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "cls", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 87520550.5, "num_examples": 2500}], "download_size": 0, "dataset_size": 87520550.5}}
2023-06-07T03:04:02+00:00
6d0786c75a952ef61f38497d0a20003498c69abc
# Dataset Card for "ah_openai_dialog_v3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Deojoandco/ah_openai_dialog_v3
[ "region:us" ]
2023-06-07T02:35:58+00:00
{"dataset_info": {"features": [{"name": "url", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "num_comments", "dtype": "int64"}, {"name": "name", "dtype": "int64"}, {"name": "title", "dtype": "int64"}, {"name": "body", "dtype": "int64"}, {"name": "score", "dtype": "int64"}, {"name": "upvote_ratio", "dtype": "int64"}, {"name": "distinguished", "dtype": "int64"}, {"name": "over_18", "dtype": "int64"}, {"name": "created_utc", "dtype": "int64"}, {"name": "comments", "dtype": "int64"}, {"name": "best_num_comments", "dtype": "int64"}, {"name": "query", "dtype": "int64"}, {"name": "dialog", "dtype": "int64"}, {"name": "dialog_success", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 332544, "num_examples": 2598}], "download_size": 237297, "dataset_size": 332544}}
2023-06-07T02:36:05+00:00
19e9b28ab07eb1df1b0d74ae1fa4376d30df037a
Vinomaly/1k-sample-comex
[ "task_categories:feature-extraction", "task_categories:text-generation", "size_categories:1K<n<10K", "language:es", "region:us" ]
2023-06-07T02:41:23+00:00
{"language": ["es"], "size_categories": ["1K<n<10K"], "task_categories": ["feature-extraction", "text-generation"]}
2023-06-07T02:42:21+00:00
c3007bb3068ed40cf815673e9c45030ffe673e0e
Kamaljp/krea-openprompt
[ "license:cc-by-2.0", "region:us" ]
2023-06-07T03:02:13+00:00
{"license": "cc-by-2.0", "dataset_info": {"features": [{"name": "seq1", "dtype": "string"}, {"name": "seq2", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 137013, "num_examples": 700}, {"name": "test", "num_bytes": 36793, "num_examples": 194}], "download_size": 112795, "dataset_size": 173806}}
2023-06-07T06:13:15+00:00
6639f20375e2f3e9ef1225bc1110b650d49dabcf
Odir/train-dmphase-pub
[ "license:afl-3.0", "region:us" ]
2023-06-07T03:05:34+00:00
{"license": "afl-3.0"}
2023-06-07T03:06:15+00:00
1c06e0f072ded280afe56fbaeb5e1be0b2cddf9e
# Dataset Card for "reddit_ah_dialog_turns" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Deojoandco/reddit_ah_dialog_turns_train
[ "region:us" ]
2023-06-07T03:15:52+00:00
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "id", "dtype": "string"}, {"name": "speaker", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3816676, "num_examples": 16055}], "download_size": 2118321, "dataset_size": 3816676}}
2023-06-07T03:16:05+00:00
538b0243e99a4101ea56f2a0310d1e007270ae4f
# Dataset Card for "ah_openai_dialog_annotation_v3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Deojoandco/ah_openai_dialog_annotation_v3
[ "region:us" ]
2023-06-07T04:01:04+00:00
{"dataset_info": {"features": [{"name": "url", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "num_comments", "dtype": "int64"}, {"name": "name", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "body", "dtype": "string"}, {"name": "score", "dtype": "int64"}, {"name": "upvote_ratio", "dtype": "float64"}, {"name": "distinguished", "dtype": "string"}, {"name": "over_18", "dtype": "bool"}, {"name": "created_utc", "dtype": "int64"}, {"name": "comments", "list": [{"name": "body", "dtype": "string"}, {"name": "created_utc", "dtype": "float64"}, {"name": "distinguished", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "permalink", "dtype": "string"}, {"name": "score", "dtype": "int64"}]}, {"name": "best_num_comments", "dtype": "int64"}, {"name": "query", "dtype": "string"}, {"name": "dialog", "dtype": "string"}, {"name": "dialog_success", "dtype": "bool"}, {"name": "__index_level_0__", "dtype": "int64"}, {"name": "annotation_error", "dtype": "bool"}, {"name": "annotation", "struct": [{"name": "Error", "dtype": "string"}, {"name": "Success", "dtype": "bool"}, {"name": "success", "dtype": "bool"}, {"name": "text", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 30690901, "num_examples": 2598}], "download_size": 17407400, "dataset_size": 30690901}}
2023-06-07T04:01:33+00:00
88fabbb0a3792c42a205ffdba90a4d0bff4a52ee
# Dataset Card for "ah_full_dialog_annotation" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Deojoandco/ah_full_dialog_annotation
[ "region:us" ]
2023-06-07T04:02:38+00:00
{"dataset_info": {"features": [{"name": "url", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "num_comments", "dtype": "int64"}, {"name": "name", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "body", "dtype": "string"}, {"name": "score", "dtype": "int64"}, {"name": "upvote_ratio", "dtype": "float64"}, {"name": "distinguished", "dtype": "string"}, {"name": "over_18", "dtype": "bool"}, {"name": "created_utc", "dtype": "float64"}, {"name": "comments", "list": [{"name": "body", "dtype": "string"}, {"name": "created_utc", "dtype": "float64"}, {"name": "distinguished", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "permalink", "dtype": "string"}, {"name": "score", "dtype": "int64"}]}, {"name": "best_num_comments", "dtype": "int64"}, {"name": "query", "dtype": "string"}, {"name": "dialog", "dtype": "string"}, {"name": "dialog_success", "dtype": "bool"}, {"name": "__index_level_0__", "dtype": "float64"}, {"name": "annotation_error", "dtype": "bool"}, {"name": "annotation", "struct": [{"name": "Error", "dtype": "string"}, {"name": "Success", "dtype": "bool"}, {"name": "success", "dtype": "bool"}, {"name": "text", "dtype": "string"}]}, {"name": "Error", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 33886049, "num_examples": 2921}], "download_size": 19222113, "dataset_size": 33886049}}
2023-06-07T04:02:58+00:00
9a6880a95a2a96c0cb1c18d3b82d137f3688e717
logxksr/smallvideo
[ "license:mit", "region:us" ]
2023-06-07T04:06:17+00:00
{"license": "mit"}
2023-06-07T04:13:59+00:00
68958e98267f5fb4a52a03ebcdae4ae59213fa7c
Dataset for [LIMA: Less Is More for Alignment](https://arxiv.org/pdf/2305.11206.pdf) ## Usage ```python from datasets import load_dataset dataset = load_dataset("GAIR/lima") ``` ## License If the source data of LIMA has a stricter license than CC BY-NC-SA, the LIMA dataset follows the same. Otherwise, it follows the CC BY-NC-SA license.
GAIR/lima
[ "license:other", "arxiv:2305.11206", "region:us" ]
2023-06-07T04:16:04+00:00
{"license": "other"}
2023-06-08T01:40:19+00:00
70a52342ded273b4ecfac44541619d77e0effde3
# Sakura_dataset 商用利用可能な超小規模高品質日本語データセット。 categoryは以下 - commonsense_qa: 常識問題 - Calc-ape210k: 数学問題 - japanese-commonsense-openqa: 日本の常識問題(自作) 下記データセットを使用しています。 - [commonsense_qa](https://huggingface.co/datasets/commonsense_qa) - [MU-NLPC/Calc-ape210k](https://huggingface.co/datasets/MU-NLPC/Calc-ape210k) ## LICENSE This dataset is licensed under Database Contents License (DbCL) v1.0 ## Update Last Update : 2023-06-07 ## Example Code ``` # モデルの読み込み import os from peft.utils.config import TaskType os.environ["CUDA_VISIBLE_DEVICES"]="0" import peft import transformers import datasets # 基本パラメータ model_name = "rinna/japanese-gpt-neox-3.6b" dataset = "saldra/sakura_japanese_dataset" is_dataset_local = False peft_name = "lora-rinna-3.6b-sakura_dataset" output_dir = "lora-rinna-3.6b-sakura_dataset-results" # トレーニング用パラメータ eval_steps = 50 #200 save_steps = 400 #200 logging_steps = 400 #20 max_steps = 400 # dollyだと 4881 # データセットの準備 data = datasets.load_dataset(dataset) CUTOFF_LEN = 512 # コンテキスト長の上限 tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, use_fast=False) model = transformers.AutoModelForCausalLM.from_pretrained( model_name, device_map='auto', load_in_8bit=True, ) model.enable_input_require_grads() model.gradient_checkpointing_enable() config = peft.LoraConfig( r=8, lora_alpha=32, lora_dropout=0.01, inference_mode=False, task_type=TaskType.CAUSAL_LM, ) model = peft.get_peft_model(model, config) # トークナイズ def tokenize(prompt, tokenizer): result = tokenizer( prompt, truncation=True, max_length=CUTOFF_LEN, padding=False, ) return { "input_ids": result["input_ids"], "attention_mask": result["attention_mask"], } # プロンプトテンプレートの準備 def generate_prompt(data_point): result = f'### 指示:\n{data_point["instruction"]}\n\n### 回答:\n{data_point["output"]}' # rinna/japanese-gpt-neox-3.6Bの場合、改行コードを<NL>に変換する必要がある result = result.replace('\n', '<NL>') return result VAL_SET_SIZE = 0.1 # 検証データの比率(float) # 学習データと検証データの準備 train_val = data["train"].train_test_split( test_size=VAL_SET_SIZE, shuffle=True, seed=42 ) train_data = train_val["train"] train_data = train_data.shuffle().map(lambda x: tokenize(generate_prompt(x), tokenizer)) val_data = train_val["test"] val_data = val_data.shuffle().map(lambda x: tokenize(generate_prompt(x), tokenizer)) trainer = transformers.Trainer( model=model, train_dataset=train_data, eval_dataset=val_data, args=transformers.TrainingArguments( num_train_epochs=3, learning_rate=3e-4, logging_steps=logging_steps, evaluation_strategy="steps", save_strategy="steps", max_steps=max_steps, eval_steps=eval_steps, save_steps=save_steps, output_dir=output_dir, report_to="none", save_total_limit=3, push_to_hub=False, auto_find_batch_size=True ), data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() # LoRAモデルの保存 trainer.model.save_pretrained(peft_name) print("Done!") ```
saldra/sakura_japanese_dataset
[ "task_categories:question-answering", "size_categories:n<1K", "language:ja", "license:other", "region:us" ]
2023-06-07T04:44:23+00:00
{"language": ["ja"], "license": "other", "size_categories": ["n<1K"], "task_categories": ["question-answering"], "pretty_name": "sakura_japanese_dataset"}
2023-06-08T10:31:06+00:00
9ef959cb57896e6fd7cbf41a959990b969630fbb
# Dataset Card for "Sample_vqa_test_for_colab" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Multimodal-Fatima/Sample_vqa_test_for_colab
[ "region:us" ]
2023-06-07T04:47:34+00:00
{"dataset_info": {"features": [{"name": "question_type", "dtype": "string"}, {"name": "multiple_choice_answer", "dtype": "string"}, {"name": "answers", "sequence": "string"}, {"name": "answers_original", "list": [{"name": "answer", "dtype": "string"}, {"name": "answer_confidence", "dtype": "string"}, {"name": "answer_id", "dtype": "int64"}]}, {"name": "id_image", "dtype": "int64"}, {"name": "answer_type", "dtype": "string"}, {"name": "question_id", "dtype": "int64"}, {"name": "question", "dtype": "string"}, {"name": "image", "dtype": "image"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 1599032.0, "num_examples": 10}], "download_size": 1594854, "dataset_size": 1599032.0}}
2023-06-07T04:47:36+00:00
f202a892d69a55e595e870769e6a2d79c623bb61
# Dataset Card for "Sample_vqa_test_for_colab" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Tristan/Sample_vqa_test_for_colab
[ "region:us" ]
2023-06-07T04:48:45+00:00
{"dataset_info": {"features": [{"name": "question_type", "dtype": "string"}, {"name": "multiple_choice_answer", "dtype": "string"}, {"name": "answers", "sequence": "string"}, {"name": "answers_original", "list": [{"name": "answer", "dtype": "string"}, {"name": "answer_confidence", "dtype": "string"}, {"name": "answer_id", "dtype": "int64"}]}, {"name": "id_image", "dtype": "int64"}, {"name": "answer_type", "dtype": "string"}, {"name": "question_id", "dtype": "int64"}, {"name": "question", "dtype": "string"}, {"name": "image", "dtype": "image"}, {"name": "id", "dtype": "int64"}, {"name": "Attributes_LAION_ViT_H_14_2B_descriptors_text_davinci_003_full", "sequence": "string"}, {"name": "DETA_detections_deta_swin_large_o365", "list": [{"name": "box", "sequence": "float32"}, {"name": "label", "dtype": "string"}, {"name": "location", "dtype": "string"}, {"name": "ratio", "dtype": "float32"}, {"name": "size", "dtype": "string"}]}, {"name": "blip_caption_False_beams_5_Salesforce_blip_image_captioning_large_max_length_30_hf", "dtype": "string"}, {"name": "blip_caption_Salesforce_blip_image_captioning_large_intensive", "sequence": "string"}, {"name": "DETA_detections_deta_swin_large_o365_caption_all_patches_Salesforce_blip_image_captioning_large_", "list": [{"name": "box", "sequence": "float64"}, {"name": "captions_all_patches", "sequence": "string"}, {"name": "label", "dtype": "string"}, {"name": "location", "dtype": "string"}, {"name": "ratio", "dtype": "float64"}, {"name": "size", "dtype": "string"}]}, {"name": "clip_tags_ViT_L_14_with_openai", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2746703.0, "num_examples": 10}], "download_size": 2136539, "dataset_size": 2746703.0}}
2023-06-08T03:55:07+00:00
800d312b93e8ea87da87644d9ee6886c33674e5b
JennnDexter/ddpm-butterflies-128
[ "license:unknown", "region:us" ]
2023-06-07T05:05:19+00:00
{"license": "unknown"}
2023-06-07T05:05:19+00:00
53670062d93b2eee85a5d2450cd1c5fd2726af7a
Dataset Summary - Scraped News Article From Utusan Borneo on 27.5.2023 - All malay articles Dataset Format ``` {"url": "...", "content": [...,...]} ```
aisyahhrazak/ms-news-utusanborneo
[ "language:ms", "region:us" ]
2023-06-07T05:25:00+00:00
{"language": ["ms"]}
2023-06-29T03:00:06+00:00
eacfe8da08bcabe05a7c447b5a22022db81005f0
# Dataset Card for "hateful_memes_test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
emily49/hateful_memes_test
[ "region:us" ]
2023-06-07T05:31:33+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "id", "dtype": "int64"}, {"name": "img", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 361453801.0, "num_examples": 1000}], "download_size": 361444198, "dataset_size": 361453801.0}}
2023-06-07T20:23:35+00:00
eee9ce6cba55045fe7a4275a4c473d40c10daae3
BenShermaister/VTTFPBS
[ "license:cc0-1.0", "region:us" ]
2023-06-07T05:47:54+00:00
{"license": "cc0-1.0"}
2023-07-01T13:17:50+00:00
65ef6e3086515381211d5c2afdbf5d1de3679a2e
# Dataset Card for "OSCAR-2301" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
vietgpt-archive/OSCAR-2301
[ "region:us" ]
2023-06-07T05:51:13+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "date", "dtype": "string"}, {"name": "perplexity", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 141524370921, "num_examples": 14802472}], "download_size": 68133953790, "dataset_size": 141524370921}}
2023-06-10T18:35:24+00:00
c27ea2bdc0d6924dedf28c504900d0243fe7814d
waleedfarooq51/custom_data
[ "language:en", "region:us" ]
2023-06-07T05:55:43+00:00
{"language": ["en"]}
2023-06-08T11:10:51+00:00
0d759c6036f7bcdc93aa7f81a7db7ee3630f3024
csdc-atl/query-document-retrieval-full
[ "license:cc-by-sa-4.0", "region:us" ]
2023-06-07T06:04:51+00:00
{"license": "cc-by-sa-4.0", "dataset_info": {"features": [{"name": "query", "sequence": "string"}, {"name": "positive", "dtype": "string"}, {"name": "negative", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 7739527819, "num_examples": 468802}], "download_size": 3196560243, "dataset_size": 7739527819}}
2023-06-07T06:16:07+00:00
222767cd32853167788b0224a26551755cd4549a
# Dataset Card for "pixel_squad_cannon" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Nadav/pixel_squad_cannon
[ "region:us" ]
2023-06-07T06:07:37+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"array2_d": {"shape": [23, 23], "dtype": "uint8"}}}], "splits": [{"name": "train", "num_bytes": 7614068486.344, "num_examples": 222844}, {"name": "test", "num_bytes": 410519961.528, "num_examples": 11873}], "download_size": 7881628043, "dataset_size": 8024588447.872}}
2023-06-07T06:19:37+00:00
d9f675e0638583f2d28f6481133bd7e58c695dce
﷽ # Dataset Card for Tarteel AI's EveryAyah Dataset ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [Tarteel AI](https://www.tarteel.ai/) - **Repository:** [Needs More Information] - **Point of Contact:** [Mohamed Saad Ibn Seddik](mailto:[email protected]) ### Dataset Summary This dataset is a collection of Quranic verses and their transcriptions, with diacritization, by different reciters. ### Supported Tasks and Leaderboards [Needs More Information] ### Languages The audio is in Arabic. ## Dataset Structure ### Data Instances A typical data point comprises the audio file `audio`, and its transcription called `text`. The `duration` is in seconds, and the author is `reciter`. An example from the dataset is: ``` { 'audio': { 'path': None, 'array': array([ 0. , 0. , 0. , ..., -0.00057983, -0.00085449, -0.00061035]), 'sampling_rate': 16000 }, 'duration': 6.478375, 'text': 'بِسْمِ اللَّهِ الرَّحْمَنِ الرَّحِيمِ', 'reciter': 'abdulsamad' } ``` ### Length: Training: Total duration: 2985111.2642479446 seconds Total duration: 49751.85440413241 minutes Total duration: 829.1975734022068 hours Validation: Total duration: 372720.43139099434 seconds Total duration: 6212.007189849905 minutes Total duration: 103.5334531641651 hours Test: Total duration: 375509.96909399604 seconds Total duration: 6258.499484899934 minutes Total duration: 104.30832474833224 hours ### Data Fields - audio: A dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. Note that when accessing the audio column: `dataset[0]["audio"]` the audio file is automatically decoded and resampled to `dataset.features["audio"].sampling_rate`. Decoding and resampling of a large number of audio files might take a significant amount of time. Thus it is important to first query the sample index before the `"audio"` column, *i.e.* `dataset[0]["audio"]` should **always** be preferred over `dataset["audio"][0]`. - text: The transcription of the audio file. - duration: The duration of the audio file. - reciter: The reciter of the verses. ### Data Splits | | Train | Test | Validation | | ----- | ----- | ---- | ---------- | | dataset | 187785 | 23473 | 23474 | ### reciters - reciters_count: 36 - reciters: {'abdul_basit', 'abdullah_basfar', 'abdullah_matroud', 'abdulsamad', 'abdurrahmaan_as-sudais', 'abu_bakr_ash-shaatree', 'ahmed_ibn_ali_al_ajamy', 'ahmed_neana', 'akram_alalaqimy', 'alafasy', 'ali_hajjaj_alsuesy', 'aziz_alili', 'fares_abbad', 'ghamadi', 'hani_rifai', 'husary', 'karim_mansoori', 'khaalid_abdullaah_al-qahtaanee', 'khalefa_al_tunaiji', 'maher_al_muaiqly', 'mahmoud_ali_al_banna', 'menshawi', 'minshawi', 'mohammad_al_tablaway', 'muhammad_abdulkareem', 'muhammad_ayyoub', 'muhammad_jibreel', 'muhsin_al_qasim', 'mustafa_ismail', 'nasser_alqatami', 'parhizgar', 'sahl_yassin', 'salaah_abdulrahman_bukhatir', 'saood_ash-shuraym', 'yaser_salamah', 'yasser_ad-dussary'} ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [Needs More Information] ## Additional Information ### Dataset Curators ### Licensing Information [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/) ### Citation Information ``` ``` ### Contributions This dataset was created by:
Salama1429/tarteel-ai-everyayah-Quran
[ "task_categories:automatic-speech-recognition", "annotations_creators:expert-generated", "language_creators:crowdsourced", "multilinguality:monolingual", "size_categories:100K<n<1M", "source_datasets:original", "language:ar", "license:mit", "region:us" ]
2023-06-07T06:15:22+00:00
{"annotations_creators": ["expert-generated"], "language_creators": ["crowdsourced"], "language": ["ar"], "license": ["mit"], "multilinguality": ["monolingual"], "size_categories": ["100K<n<1M"], "source_datasets": ["original"], "task_categories": ["automatic-speech-recognition"], "task_ids": [], "paperswithcode_id": "tarteel-everyayah", "pretty_name": "Tarteel AI - EveryAyah Dataset", "dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "duration", "dtype": "float64"}, {"name": "text", "dtype": "string"}, {"name": "reciter", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 262627688145.3, "num_examples": 187785}, {"name": "test", "num_bytes": 25156009734.72, "num_examples": 23473}, {"name": "validation", "num_bytes": 23426886730.218, "num_examples": 23474}], "download_size": 117190597305, "dataset_size": 311210584610.23804}, "train-eval-index": [{"config": "clean", "task": "automatic-speech-recognition", "task_id": "speech_recognition", "splits": {"train_split": "train", "eval_split": "test", "validation_split": "validation"}, "col_mapping": {"audio": "audio", "text": "text", "reciter": "text"}, "metrics": [{"type": "wer", "name": "WER"}, {"type": "cer", "name": "CER"}]}]}
2023-06-07T13:17:32+00:00
0395a7a23095b887c040c6873e6eedf1cbc0f37f
Please email us ([email protected]) to explain your identity and purpose before requesting access. *Directly requesting will not be approved.* *Please make sure that all data are used for research only.* *Please note that raw videos of the training set, which from Youtube, are available now.* Github: https://github.com/DeepLearnXMU/BigVideo-VMT
fringek/BigVideo
[ "region:us" ]
2023-06-07T06:30:43+00:00
{}
2024-02-17T17:31:43+00:00
dc186312593122a47b373cfaa8fadbd8ff4f6a57
SilpaCS/Augmented_alzheimer
[ "task_categories:image-classification", "size_categories:10K<n<100K", "language:en", "medical", "region:us" ]
2023-06-07T06:34:13+00:00
{"language": ["en"], "size_categories": ["10K<n<100K"], "task_categories": ["image-classification"], "tags": ["medical"]}
2023-06-07T06:56:55+00:00
31ed390aec4144c67febb3bbb1046230e302bf71
chtan0212/test1
[ "task_categories:token-classification", "size_categories:10K<n<100K", "language:en", "license:apache-2.0", "region:us" ]
2023-06-07T06:38:31+00:00
{"language": ["en"], "license": "apache-2.0", "size_categories": ["10K<n<100K"], "task_categories": ["token-classification"], "pretty_name": "test 1 pretty name"}
2023-06-07T06:41:37+00:00
b329b81989a8af34b83671c04b47f7c020fca47a
# Dataset Card for "Ashaar_tafeelah" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
arbml/Ashaar_tafeelah
[ "region:us" ]
2023-06-07T06:43:18+00:00
{"dataset_info": {"features": [{"name": "sequence", "dtype": "string"}, {"name": "tafeelah", "dtype": "string"}, {"name": "meter", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 78684, "num_examples": 986}], "download_size": 18630, "dataset_size": 78684}}
2023-06-07T07:06:09+00:00
4339afe58faf1c970d2898174ec1e93e66f0bbcf
# Dataset Card for "bender-blip2-captions-512" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Norod78/bender-blip2-captions-512
[ "language:en", "region:us" ]
2023-06-07T06:52:03+00:00
{"language": "en", "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 64037080.0, "num_examples": 260}], "download_size": 63813431, "dataset_size": 64037080.0}}
2023-07-16T11:11:46+00:00
419fefd46af510e61787db7de7f893ad6f71a45f
# Dataset Card for "tarteel-ai-EA-UD" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Salama1429/tarteel-ai-EA-UD
[ "region:us" ]
2023-06-07T07:02:47+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "duration", "dtype": "float64"}, {"name": "transcription", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 119515195286.768, "num_examples": 234732}], "download_size": 2637413320, "dataset_size": 119515195286.768}}
2023-06-07T13:29:13+00:00
7008605b200d6271b24d9b01bbf206e18df009fc
dputilov/TTL
[ "license:other", "region:us" ]
2023-06-07T07:06:50+00:00
{"license": "other"}
2024-02-14T18:36:58+00:00
d29dbb68b1d846fbece23656730918a2c8baeb49
# Dataset Card for "Ashaar_ardui" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
arbml/Ashaar_aruid_v0
[ "region:us" ]
2023-06-07T07:07:20+00:00
{"dataset_info": {"features": [{"name": "sequence", "dtype": "string"}, {"name": "tafeelah", "dtype": "string"}, {"name": "meter", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 78684, "num_examples": 986}], "download_size": 18630, "dataset_size": 78684}}
2023-06-07T07:07:21+00:00
aacb04317dc7701e786bfa6335f2bf212c7fd455
# Dataset Card for "tarteel-ai-EA-DI" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Salama1429/tarteel-ai-EA-DI
[ "region:us" ]
2023-06-07T07:10:38+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "duration", "dtype": "float64"}, {"name": "transcription", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 134530727598.282, "num_examples": 245093}], "download_size": 5374089950, "dataset_size": 134530727598.282}}
2023-06-07T12:20:30+00:00
4122770c68b441b1d048fa1c0f18ca7375bc5afc
> This dataset is identical to **[cdminix/libritts-aligned](https://huggingface.co/datasets/cdminix/libritts-aligned)** except it uses the newly released LibriTTS-R corpus. Please cite **[Y. Koizumi, et al., "LibriTTS-R: Restoration of a Large-Scale Multi-Speaker TTS Corpus", Interspeech 2023](https://google.github.io/df-conformer/librittsr/)** *When using this dataset to download LibriTTS-R, make sure you agree to the terms on https://www.openslr.org* # Dataset Card for LibriTTS-R with Forced Alignments (and Measures) This dataset downloads LibriTTS-R and preprocesses it on your machine to create alignments using [montreal forced aligner](https://montreal-forced-aligner.readthedocs.io/en/latest/). You need to run ``pip install alignments phones`` before using this dataset. When running this the first time, it can take an hour or two, but subsequent runs will be lightning fast. ## Requirements - ``pip install alignments phones`` **(required)** - ``pip install speech-collator`` (optional) *Note: version >=0.0.15 of alignments is required for this corpus* ## Example Item ```json { 'id': '100_122655_000073_000002.wav', 'speaker': '100', 'text': 'the day after, diana and mary quitted it for distant b.', 'start': 0.0, 'end': 3.6500000953674316, 'phones': ['[SILENCE]', 'ð', 'ʌ', '[SILENCE]', 'd', 'eɪ', '[SILENCE]', 'æ', 'f', 't', 'ɜ˞', '[COMMA]', 'd', 'aɪ', 'æ', 'n', 'ʌ', '[SILENCE]', 'æ', 'n', 'd', '[SILENCE]', 'm', 'ɛ', 'ɹ', 'i', '[SILENCE]', 'k', 'w', 'ɪ', 't', 'ɪ', 'd', '[SILENCE]', 'ɪ', 't', '[SILENCE]', 'f', 'ɜ˞', '[SILENCE]', 'd', 'ɪ', 's', 't', 'ʌ', 'n', 't', '[SILENCE]', 'b', 'i', '[FULL STOP]'], 'phone_durations': [5, 2, 4, 0, 5, 13, 0, 16, 7, 5, 20, 2, 6, 9, 15, 4, 2, 0, 11, 3, 5, 0, 3, 8, 9, 8, 0, 13, 3, 5, 3, 6, 4, 0, 8, 5, 0, 9, 5, 0, 7, 5, 6, 7, 4, 5, 10, 0, 3, 35, 9], 'audio': '/dev/shm/metts/train-clean-360-alignments/100/100_122655_000073_000002.wav' } ``` The phones are IPA phones, and the phone durations are in frames (assuming a hop length of 256, sample rate of 22050 and window length of 1024). These attributes can be changed using the ``hop_length``, ``sample_rate`` and ``window_length`` arguments to ``LibriTTSAlign``. ## Data Collator This dataset comes with a data collator which can be used to create batches of data for training. It can be installed using ``pip install speech-collator`` ([MiniXC/speech-collator](https://www.github.com/MiniXC/speech-collator)) and can be used as follows: ```python import json from datasets import load_dataset from speech_collator import SpeechCollator from torch.utils.data import DataLoader dataset = load_dataset('cdminix/libritts-aligned', split="train") speaker2ixd = json.load(open("speaker2idx.json")) phone2ixd = json.load(open("phone2idx.json")) collator = SpeechCollator( speaker2ixd=speaker2idx, phone2ixd=phone2idx , ) dataloader = DataLoader(dataset, collate_fn=collator.collate_fn, batch_size=8) ``` You can either download the ``speaker2idx.json`` and ``phone2idx.json`` files from [here](https://huggingface.co/datasets/cdminix/libritts-aligned/tree/main/data) or create them yourself using the following code: ```python import json from datasets import load_dataset from speech_collator import SpeechCollator, create_speaker2idx, create_phone2idx dataset = load_dataset("cdminix/libritts-aligned", split="train") # Create speaker2idx and phone2idx speaker2idx = create_speaker2idx(dataset, unk_idx=0) phone2idx = create_phone2idx(dataset, unk_idx=0) # save to json with open("speaker2idx.json", "w") as f: json.dump(speaker2idx, f) with open("phone2idx.json", "w") as f: json.dump(phone2idx, f) ``` ### Measures When using ``speech-collator`` you can also use the ``measures`` argument to specify which measures to use. The following example extracts Pitch and Energy on the fly. ```python import json from torch.utils.data import DataLoader from datasets import load_dataset from speech_collator import SpeechCollator, create_speaker2idx, create_phone2idx from speech_collator.measures import PitchMeasure, EnergyMeasure dataset = load_dataset("cdminix/libritts-aligned", split="train") speaker2idx = json.load(open("data/speaker2idx.json")) phone2idx = json.load(open("data/phone2idx.json")) # Create SpeechCollator speech_collator = SpeechCollator( speaker2idx=speaker2idx, phone2idx=phone2idx, measures=[PitchMeasure(), EnergyMeasure()], return_keys=["measures"] ) # Create DataLoader dataloader = DataLoader( dataset, batch_size=8, collate_fn=speech_collator.collate_fn, ) ``` COMING SOON: Detailed documentation on how to use the measures at [MiniXC/speech-collator](https://www.github.com/MiniXC/speech-collator). ## Splits This dataset has the following splits: - ``train``: All the training data, except one sample per speaker which is used for validation. - ``dev``: The validation data, one sample per speaker. - ``train.clean.100``: Training set derived from the original materials of the train-clean-100 subset of LibriSpeech. - ``train.clean.360``: Training set derived from the original materials of the train-clean-360 subset of LibriSpeech. - ``train.other.500``: Training set derived from the original materials of the train-other-500 subset of LibriSpeech. - ``dev.clean``: Validation set derived from the original materials of the dev-clean subset of LibriSpeech. - ``dev.other``: Validation set derived from the original materials of the dev-other subset of LibriSpeech. - ``test.clean``: Test set derived from the original materials of the test-clean subset of LibriSpeech. - ``test.other``: Test set derived from the original materials of the test-other subset of LibriSpeech. ## Environment Variables There are a few environment variable which can be set. - ``LIBRITTS_VERBOSE``: If set, will print out more information about the dataset creation process. - ``LIBRITTS_MAX_WORKERS``: The number of workers to use when creating the alignments. Defaults to ``cpu_count()``. - ``LIBRITTS_PATH``: The path to download LibriTTS to. Defaults to the value of ``HF_DATASETS_CACHE``. # Citation When using LibriTTS-R please cite the following papers: - [LibriTTS-R: Restoration of a Large-Scale Multi-Speaker TTS Corpus](https://google.github.io/df-conformer/librittsr/) - [LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech](https://arxiv.org/abs/1904.02882) - [Montreal Forced Aligner: Trainable text-speech alignment using Kaldi](https://www.researchgate.net/publication/319185277_Montreal_Forced_Aligner_Trainable_Text-Speech_Alignment_Using_Kaldi) When using the Measures please cite the following paper (ours): - [Evaluating and reducing the distance between synthetic and real speech distributions](https://arxiv.org/abs/2211.16049)
cdminix/libritts-r-aligned
[ "task_categories:automatic-speech-recognition", "task_categories:text-to-speech", "annotations_creators:crowdsourced", "language:en", "license:cc-by-4.0", "speech", "audio", "automatic-speech-recognition", "text-to-speech", "arxiv:1904.02882", "arxiv:2211.16049", "region:us" ]
2023-06-07T07:35:07+00:00
{"annotations_creators": ["crowdsourced"], "language": "en", "license": ["cc-by-4.0"], "task_categories": ["automatic-speech-recognition", "text-to-speech"], "pretty_name": "LibriTTS Corpus with Forced Alignments", "tags": ["speech", "audio", "automatic-speech-recognition", "text-to-speech"], "extra_gated_prompt": "When using this dataset to download LibriTTS, you agree to the terms on https://www.openslr.org"}
2023-07-02T14:13:39+00:00
c04c8161fd151b4313a010e0f6a71b95dc0f67c9
https://github.com/google-research/language/tree/master/language/wino_dict ```@inproceedings{51779, title = {WinoDict: Probing language models for in-context language acquisition}, author = {Fangyu Liu and Jeremy Cole and Julian Martin Eisenschlos and William Weston Cohen}, year = {2022}, URL = {https://arxiv.org/abs/2209.12153}, booktitle = {EACL} } ```
tasksource/winodict
[ "language:en", "license:cc-by-4.0", "arxiv:2209.12153", "region:us" ]
2023-06-07T07:48:52+00:00
{"language": "en", "license": "cc-by-4.0", "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "lemma", "dtype": "string"}, {"name": "fake_lemma", "dtype": "string"}, {"name": "pos", "dtype": "string"}, {"name": "tag", "dtype": "string"}, {"name": "pronoun", "dtype": "string"}, {"name": "definition", "dtype": "string"}, {"name": "sentence", "dtype": "string"}, {"name": "option1", "dtype": "string"}, {"name": "option2", "dtype": "string"}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 415190, "num_examples": 1488}, {"name": "val", "num_bytes": 135624, "num_examples": 496}, {"name": "test", "num_bytes": 135191, "num_examples": 496}], "download_size": 249676, "dataset_size": 686005}}
2023-07-13T10:07:34+00:00
c32734b09a788c0039bdfb26b8e5f0e2e2b1f335
JWX1/video_emotion
[ "task_categories:text-classification", "task_categories:zero-shot-classification", "size_categories:1K<n<10K", "language:en", "art", "region:us" ]
2023-06-07T07:49:52+00:00
{"language": ["en"], "size_categories": ["1K<n<10K"], "task_categories": ["text-classification", "zero-shot-classification"], "pretty_name": "video_emotion", "tags": ["art"]}
2023-06-07T07:51:20+00:00
78b9075f2b2bb7e6679e11bce9ff7ed14517141a
EasyTerms/Manuel_dataset
[ "task_categories:summarization", "size_categories:n<1K", "language:en", "license:apache-2.0", "legal", "region:us" ]
2023-06-07T07:50:06+00:00
{"language": ["en"], "license": "apache-2.0", "size_categories": ["n<1K"], "task_categories": ["summarization"], "pretty_name": "lilo", "tags": ["legal"]}
2023-11-02T14:03:24+00:00
b0b4a19860eea1740f04a7836612ec239e08e838
tasksource/miqa
[ "license:cc-by-4.0", "region:us" ]
2023-06-07T07:53:11+00:00
{"license": "cc-by-4.0"}
2023-06-07T07:53:26+00:00
c5c8aa713aeecaf28ce0805d09db69310c91b641
# Vision-CAIR cc_sbu_align in multilang This is Google-translated versions of [Vision-CAIR/cc_sbu_align](https://huggingface.co/datasets/Vision-CAIR/cc_sbu_align). Please visit [2. Second finetuning stage](https://huggingface.co/datasets/Vision-CAIR/cc_sbu_align#training) to understand how the English one was created. Here I put `filter_cap.json` of each folder for each language. Current languages: - en - vi There will be more if I have time.
dinhanhx/cc_sbu_align_multilang
[ "task_categories:image-to-text", "task_ids:image-captioning", "language:vi", "language:en", "license:unknown", "cc_sbu_align_multilang", "region:us" ]
2023-06-07T08:07:59+00:00
{"language": ["vi", "en"], "license": "unknown", "task_categories": ["image-to-text"], "task_ids": ["image-captioning"], "pretty_name": "Vision-CAIR cc_sbu_align in multilang", "source-datasets": ["google conceptual captions", "SBU Captioned Photo Dataset", "Vision-CAIR"], "tags": ["cc_sbu_align_multilang"]}
2023-06-07T08:25:20+00:00
1711a20bd4b3abc61ac517448d9f0192d0ef83fb
jackyshen/dazucarve
[ "license:openrail", "region:us" ]
2023-06-07T08:11:32+00:00
{"license": "openrail"}
2023-06-07T08:12:21+00:00
21547a9a954dd94aedbfc5c1bf45d580a897dd94
# Dataset Card for Dataset Name ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
Superlang/element_data_set
[ "task_categories:image-to-image", "size_categories:n<1K", "language:en", "language:zh", "license:cc-by-nc-4.0", "region:us" ]
2023-06-07T08:57:41+00:00
{"language": ["en", "zh"], "license": "cc-by-nc-4.0", "size_categories": ["n<1K"], "task_categories": ["image-to-image"]}
2023-06-07T09:39:48+00:00
edd253352cd0381bfe02fda2f9dc8adc7aa0ad1b
English wikipedia page headings under the following format: columns: - upwards: the path from the current node towards the root of the headings tree - downwards: the direct (no deeper than 1 layer) children of the current node
umarzein/wikipedia-headings-20k
[ "size_categories:10K<n<100K", "license:cc-by-3.0", "region:us" ]
2023-06-07T09:04:06+00:00
{"license": "cc-by-3.0", "size_categories": ["10K<n<100K"], "pretty_name": "Wikipedia Headings"}
2023-06-07T12:20:01+00:00
4b36f04f270113318a0da5b5b5390a339481c117
# Dataset Card for "dummy_en_asr" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
omar47/dummy_en_asr
[ "region:us" ]
2023-06-07T09:14:14+00:00
{"dataset_info": {"features": [{"name": "path", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 13315953.0, "num_examples": 60}, {"name": "validation", "num_bytes": 3749618.0, "num_examples": 40}, {"name": "test", "num_bytes": 5333789.0, "num_examples": 40}], "download_size": 21477003, "dataset_size": 22399360.0}}
2023-06-07T09:14:19+00:00
de3498407e843a5030c1f7ee0a6697e2b5ccb100
# Dataset Card for Dataset Name ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
davanstrien/on_the_books
[ "language:en", "license:cc-by-3.0", "lam", "region:us" ]
2023-06-07T09:17:22+00:00
{"language": ["en"], "license": "cc-by-3.0", "pretty_name": "On the Books", "tags": ["lam"]}
2023-06-07T09:19:59+00:00
226f75ab5e6ca68720bfb63f6e046919b2df5413
# Dataset Card for "slack-data-long-responses" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Samhita/slack-data-long-responses
[ "region:us" ]
2023-06-07T09:38:46+00:00
{"dataset_info": {"features": [{"name": "output", "dtype": "string"}, {"name": "input", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 14456532, "num_examples": 24503}], "download_size": 7626216, "dataset_size": 14456532}}
2023-06-07T09:38:48+00:00
4083676f5d3c9c634c614f3dbe4581b3be4bb00b
# Dataset Card for Dataset Name ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
sloppysid/faiss_search
[ "task_categories:question-answering", "size_categories:1K<n<10K", "language:en", "license:apache-2.0", "region:us" ]
2023-06-07T10:20:31+00:00
{"language": ["en"], "license": "apache-2.0", "size_categories": ["1K<n<10K"], "task_categories": ["question-answering"], "pretty_name": "Faiss index for pdf search"}
2023-06-12T12:07:59+00:00
becd876f32492ec522feffe4ee8ef585afec46b6
## Dataset Summary The Arithmetic Operations Dataset is a synteticly generated collection of mathematical arithmetic operations for practice and evaluation purposes. It contains a total of 624,800 arithmetic operations, consisting of 568,000 addition operations and 56,800 subtraction operations. The dataset is designed to provide a range of arithmetic problems to train and evaluate language models for solving simple arithmetic (mostly addition, the others TBA) problems. ## Dataset Structure The dataset is organized into two main categories: addition and subtraction. Each category contains a set of arithmetic operations in separate files (`addition.json`) and (`subtraction.json`), and the file (`dataset.json`) provides combined data from both. ### Data Instances ```bash { "instruction": "What is the answer to 373486002216116154 + 339369?", "input": "373486002216116154 + 339369", "output": "373486002216116154 + 339369 = 373486002216455523", "answer": "373486002216455523" }, { "instruction": "9916607491627649 minus 581954", "input": "9916607491627649 - 581954", "output": "9916607491627649 - 581954 = 9916607491045695", "answer": "9916607491045695" }, ``` ### Data Fields The files share the same structure and have 4 fields: - `instruction`: Human instructions are generated by inserting arithmetic expressions into randomly selected templates and incorporating natural language variations. These instructions are intended to serve as prompts for instruction-finetuning, providing input for training the model. - `input`: A randomly generated arithmetic expression, that can serve as a substitute for the 'instruction' component during training, allowing a specific focus on arithmetic operations while minimizing the impact of natural language. - `output`: the target output for the model to learn. - `answer`: direct numerical answer to the arithmetic task. It can be used to test learnability of various sub-tasks. ## Contact For any questions or inquiries regarding this dataset, please contact [email protected].
xufana/RedPajama-INCITE-Instruct-3B-Addition
[ "task_categories:text-generation", "size_categories:100K<n<1M", "language:en", "license:apache-2.0", "arithmetics", "region:us" ]
2023-06-07T10:23:03+00:00
{"language": ["en"], "license": "apache-2.0", "size_categories": ["100K<n<1M"], "task_categories": ["text-generation"], "pretty_name": "RedPajama Arithmetics", "tags": ["arithmetics"]}
2023-06-07T11:10:11+00:00
fd686a2ecb166276efebc17cd4a031aa12f48064
仓库
sit1/sd-data
[ "region:us" ]
2023-06-07T10:27:00+00:00
{}
2023-06-07T10:28:48+00:00
2e62a503ac6bde9271348a5ca5e743a4a8a55541
## Guidelines These guidelines are based on the paper [Training Language Models to Follow Instructions with Human Feedback](https://cdn.openai.com/papers/Training_language_models_to_follow_instructions_with_human_feedback.pdf) **You are given a text-based description of a task, submitted by a user.** This task description may be in the form of an explicit instruction (e.g. "Write a story about a wise frog."). The task may also be specified indirectly, for example by using several examples of the desired behavior (e.g. given a sequence of movie reviews followed by their sentiment, followed by one movie review without a sentiment, you can assume the task is to predict the sentiment of the final review), or by generating the start of a desired output (e.g. given "There once was a wise frog named Julius," you can assume the task is to continue the story). **You will also be given several text outputs, intended to help the user with their task.** Your job is to evaluate these outputs to ensure that they are helpful, truthful, and harmless, and select the best according to these criteria. In case **none of the outputs is correct, conforming to the criteria, or they're exactly equal, use the discard option to not rank them**. For most tasks, being truthful and harmless is more important than being helpful. By **helpful**, we mean that the output should follow the user's intention and help the user solve their task. Some examples of helpful behavior: - Writing in clear language. - Answering the question they meant to ask, even if they mis-asked it. - Being sensitive to internationality (e.g., "football" shouldn't mean American football, and "the president" doesn't necessarily mean the US president). - If an instruction is too confusing, asking for clarification and explaining why the instruction is confusing. - Not giving overly long or rambling answers, or repeating information from the question. - Not assuming extraneous extra context outside of what's given (besides things like facts about the world), unless that's an implied part of the task. For example, if asked to "write a polite email response to this email: email body," the output shouldn't assume "I can't make it this time, but am free next weekend." But if asked to "Write an email to Socrates," then you can safely assume the output can make up a context. By **truthful**, we mean that the output contains accurate information and doesn't mislead the user. Some examples of truthful behavior: - On tasks like summarization, where the output should only use information from input, not making up details that are not part of the input description. - Not producing clearly false information about the world (e.g., making up facts or promoting conspiracies). For example, the output should not state that Hillary Clinton has served time in prison. - Avoid generating misleading information or information with questionable authenticity. For example, if the input asks "Why did Hillary Clinton go to jail?", the output should not say "It's not totally clear," but rather should refute the premise of the question. By **harmless**, we mean that the output should not cause physical, psychological, or social harm to people; damage to or loss of equipment or property; damage to the environment; or harm to institutions or resources necessary to human well-being. Some examples of harmless behavior: - Treating other humans with kindness, respect, and consideration; not denigrating members of certain groups or using biased language against a particular group. - Not generating abusive, threatening, or offensive language or promoting violence. - Not writing sexual or violent content if it's not asked for. - Not giving bad real-world advice or promoting illegal activity. Evaluating model outputs may involve making trade-offs between these criteria. These trade-offs will depend on the task. ## Fields * `instruction` is of type <class 'str'> * `response-1` is of type <class 'str'> * `response-2` is of type <class 'str'> ## Questions * `choose-best` : Choose the most helpful, harmless, and truthful response. Select 1 for response-1, 2 for response-2, or discard if both are equally good/bad. ## Load with Argilla To load this dataset with Argilla, you'll just need to install Argilla as `pip install argilla --upgrade` and then use the following code: ```python import argilla as rg ds = rg.FeedbackDataset.from_huggingface('argilla/reward-model-data-falcon') ``` ## Load with Datasets To load this dataset with Datasets, you'll just need to install Datasets as `pip install datasets --upgrade` and then use the following code: ```python from datasets import load_dataset ds = load_dataset('argilla/reward-model-data-falcon') ```
argilla/reward-model-data-falcon
[ "region:us" ]
2023-06-07T10:33:23+00:00
{}
2023-06-07T10:33:30+00:00
d95804b169493b8b41f53cf483b908b83a38086c
# Dataset Card for "stanford_alpaca" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Amirkid/stanford_alpaca
[ "region:us" ]
2023-06-07T10:59:34+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 73322820, "num_examples": 104004}], "download_size": 518089, "dataset_size": 73322820}}
2023-06-07T10:59:36+00:00
c4657e08faa6981be37aaf08429d49b90d758051
AgentWaller/openassistant-guanaco-en-translated
[ "license:apache-2.0", "region:us" ]
2023-06-07T11:12:59+00:00
{"license": "apache-2.0", "dataset_info": {"features": [{"name": "message_id", "dtype": "string"}, {"name": "parent_id", "dtype": "string"}, {"name": "user_id", "dtype": "string"}, {"name": "created_date", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "role", "dtype": "string"}, {"name": "lang", "dtype": "string"}, {"name": "review_count", "dtype": "int64"}, {"name": "review_result", "dtype": "bool"}, {"name": "deleted", "dtype": "bool"}, {"name": "rank", "dtype": "int64"}, {"name": "synthetic", "dtype": "bool"}, {"name": "model_name", "dtype": "null"}, {"name": "detoxify", "struct": [{"name": "identity_attack", "dtype": "float64"}, {"name": "insult", "dtype": "float64"}, {"name": "obscene", "dtype": "float64"}, {"name": "severe_toxicity", "dtype": "float64"}, {"name": "sexual_explicit", "dtype": "float64"}, {"name": "threat", "dtype": "float64"}, {"name": "toxicity", "dtype": "float64"}]}, {"name": "message_tree_id", "dtype": "string"}, {"name": "tree_state", "dtype": "string"}, {"name": "emojis", "struct": [{"name": "count", "sequence": "int64"}, {"name": "name", "sequence": "string"}]}, {"name": "labels", "struct": [{"name": "count", "sequence": "int64"}, {"name": "name", "sequence": "string"}, {"name": "value", "sequence": "float64"}]}], "splits": [{"name": "train", "num_bytes": 32780237, "num_examples": 29329}, {"name": "validation", "num_bytes": 1724911, "num_examples": 1536}], "download_size": 13607387, "dataset_size": 34505148}}
2023-06-08T09:44:15+00:00
d22761838261d5da4a559d50cc01fe1c00a1b3f0
# Dataset Card for Dataset Name ## Dataset Description - **Repository:** https://github.com/msmadi/ABSA-Hotels/tree/master ### Dataset Summary Aspect based sentiment analysis dataset using hotel reviews in Arabic. ### Languages Arabic ### Licensing Information Original dataset was licensed under MIT, so this is also under MIT ### Citation Information Cite this and the original authors if you want to.
eastwind/semeval-2016-absa-reviews-arabic
[ "task_categories:text-classification", "size_categories:1K<n<10K", "language:ar", "license:mit", "region:us" ]
2023-06-07T11:22:40+00:00
{"language": ["ar"], "license": "mit", "size_categories": ["1K<n<10K"], "task_categories": ["text-classification"], "pretty_name": "SemEval 2016 Aspect Based Sentiment Analysis on Hotel Reviews"}
2023-06-07T12:09:16+00:00
47f70a0d3c0d4352a060747fb394cb8c5ae64121
# Dataset Card for "pg-ko-tknizer-en_code" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
hac541309/pg-ko-tknizer-en_code
[ "language:en", "region:us" ]
2023-06-07T11:48:08+00:00
{"language": "en", "dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 8602140384, "num_examples": 1300000}], "download_size": 4022436417, "dataset_size": 8602140384}}
2023-07-14T02:37:19+00:00
24d7ac97e6f77ab7a809e4d866f01bba22bb64e3
Thouph/formatted
[ "license:wtfpl", "region:us" ]
2023-06-07T12:02:42+00:00
{"license": "wtfpl"}
2023-06-07T13:24:58+00:00
ed5856b47980dbfa86e9237c507cd3165ad30542
# Dataset Card for "pytorch-discuss-tutorial-346" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
shrinath-suresh/pytorch-discuss-tutorial-346
[ "region:us" ]
2023-06-07T12:10:05+00:00
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 646894, "num_examples": 346}], "download_size": 246825, "dataset_size": 646894}}
2023-06-07T12:10:07+00:00
9e830a698f73a3a87a9a5ce82433ae97dbe5c5d9
for context, deserialize the json into Node classes from: https://gist.github.com/UmarZein/4c46bc42323d0f61bd3494dec48f3fa4 the difference between this dataset and https://huggingface.co/datasets/umarzein/wikipedia-headings-20k is that this one is more compact i.e.: the column `rootwards` is distinct so it saves space but you have to parse the json first
umarzein/wikipedia-headings-tree-7k
[ "size_categories:1K<n<10K", "license:cc-by-3.0", "region:us" ]
2023-06-07T12:17:31+00:00
{"license": "cc-by-3.0", "size_categories": ["1K<n<10K"]}
2023-06-08T03:03:13+00:00
e3313e3f13a7b4d32de50668c11e70d7b14e4eec
# Dataset Card for "pixel_glue_mrpc" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Nadav/pixel_glue_mrpc
[ "region:us" ]
2023-06-07T12:23:13+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}], "splits": [{"name": "train", "num_bytes": 65494891.5, "num_examples": 3668}, {"name": "validation", "num_bytes": 7296339.0, "num_examples": 408}], "download_size": 72091791, "dataset_size": 72791230.5}}
2023-06-08T10:13:45+00:00
ab6591dc0cd82cbbcc8ebded001269e32eb1f4cc
# Dataset Card for "malang2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
minyoung9353/malang2
[ "region:us" ]
2023-06-07T12:27:09+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 113455.0, "num_examples": 1}], "download_size": 111832, "dataset_size": 113455.0}}
2023-06-07T12:29:57+00:00
70c923560f0f287d6b8b293e991dbcd281ab0cda
# Dataset Card for "b8d275dd" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
results-sd-v1-5-sd-v2-1-if-v1-0-karlo/b8d275dd
[ "region:us" ]
2023-06-07T12:36:33+00:00
{"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 180, "num_examples": 10}], "download_size": 1340, "dataset_size": 180}}
2023-06-07T12:36:34+00:00
c1196c02f383c667ea1b5e2968e01372aedbb683
# Dataset Card for "pixel_glue_cola" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Nadav/pixel_glue_cola
[ "region:us" ]
2023-06-07T12:38:16+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}], "splits": [{"name": "train", "num_bytes": 50427044.125, "num_examples": 8551}, {"name": "validation", "num_bytes": 6217509.625, "num_examples": 1043}], "download_size": 46779514, "dataset_size": 56644553.75}}
2023-06-08T06:25:47+00:00
5e686490ad6c042fe711ee0a6de362aacc93394f
# Dataset Card for "d7f07bf2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
results-sd-v1-5-sd-v2-1-if-v1-0-karlo/d7f07bf2
[ "region:us" ]
2023-06-07T12:41:46+00:00
{"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 182, "num_examples": 10}], "download_size": 1330, "dataset_size": 182}}
2023-06-07T12:41:47+00:00
914c7ce26fe2f785ad65c4413c1af5c7150ac3d8
# Dataset Card for "comparison-data-falcon" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dvilasuero/comparison-data-falcon
[ "region:us" ]
2023-06-07T12:53:19+00:00
{"dataset_info": {"features": [{"name": "instruction", "dtype": "string", "id": "field"}, {"name": "response-1", "dtype": "string", "id": "field"}, {"name": "response-2", "dtype": "string", "id": "field"}, {"name": "choose-best", "sequence": [{"name": "user_id", "dtype": "string"}, {"name": "value", "dtype": "int32"}, {"name": "status", "dtype": "string"}], "id": "question"}, {"name": "external_id", "dtype": "string", "id": "external_id"}], "splits": [{"name": "train", "num_bytes": 124852, "num_examples": 100}], "download_size": 87155, "dataset_size": 124852}}
2023-06-07T12:53:24+00:00
9df4197de850403796ed66dc7f5860f839f6caed
# Dataset Card for "drivable_area_segmentation" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
samuelsze/drivable_area_segmentation
[ "region:us" ]
2023-06-07T12:53:22+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 55242426.169, "num_examples": 21649}, {"name": "validation", "num_bytes": 11151638.396, "num_examples": 4489}], "download_size": 41107356, "dataset_size": 66394064.565}}
2023-06-07T23:19:57+00:00
131790acd971e3b2dd9d6ca8fc5c3e0e2f775d91
# Dataset Card for "comparison-data-falcon-with-feedback" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dvilasuero/comparison-data-falcon-with-feedback
[ "region:us" ]
2023-06-07T12:53:51+00:00
{"dataset_info": {"features": [{"name": "instruction", "dtype": "string", "id": "field"}, {"name": "response-1", "dtype": "string", "id": "field"}, {"name": "response-2", "dtype": "string", "id": "field"}, {"name": "choose-best", "sequence": [{"name": "user_id", "dtype": "string"}, {"name": "value", "dtype": "int32"}, {"name": "status", "dtype": "string"}], "id": "question"}, {"name": "external_id", "dtype": "string", "id": "external_id"}], "splits": [{"name": "train", "num_bytes": 125961, "num_examples": 100}], "download_size": 87836, "dataset_size": 125961}}
2023-06-07T12:53:55+00:00
16f89769dca42a8202f0f88ef6c5e5375020b12e
# Dataset Card for comparison-data-falcon-with-feedback This dataset has been created with [Argilla](https://docs.argilla.io). As shown in the sections below, this dataset can be loaded into Argilla as explained in [Load with Argilla](#load-with-argilla), or used directly with the `datasets` library in [Load with `datasets`](#load-with-datasets). ## Dataset Description - **Homepage:** https://argilla.io - **Repository:** https://github.com/argilla-io/argilla - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This dataset contains: * A dataset configuration file conforming to the Argilla dataset format named `argilla.cfg`. This configuration file will be used to configure the dataset when using the `FeedbackDataset.from_huggingface` method in Argilla. * Dataset records in a format compatible with HuggingFace `datasets`. These records will be loaded automatically when using `FeedbackDataset.from_huggingface` and can be loaded independently using the `datasets` library via `load_dataset`. * The [annotation guidelines](#annotation-guidelines) that have been used for building and curating the dataset, if they've been defined in Argilla. ### Load with Argilla To load with Argilla, you'll just need to install Argilla as `pip install argilla --upgrade` and then use the following code: ```python import argilla as rg ds = rg.FeedbackDataset.from_huggingface("argilla/comparison-data-falcon-with-feedback") ``` ### Load with `datasets` To load this dataset with `datasets`, you'll just need to install `datasets` as `pip install datasets --upgrade` and then use the following code: ```python from datasets import load_dataset ds = load_dataset("argilla/comparison-data-falcon-with-feedback") ``` ### Supported Tasks and Leaderboards This dataset can contain [multiple fields, questions and responses](https://docs.argilla.io/en/latest/guides/llms/conceptual_guides/data_model.html) so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the [Dataset Structure section](#dataset-structure). There are no leaderboards associated with this dataset. ### Languages [More Information Needed] ## Dataset Structure ### Data in Argilla The dataset is created in Argilla with: **fields**, **questions**, and **guidelines**. The **fields** are the dataset records themselves, for the moment just text fields are suppported. These are the ones that will be used to provide responses to the questions. | Field Name | Title | Type | Required | Markdown | | ---------- | ----- | ---- | -------- | -------- | | instruction | User instruction | TextField | True | False | | response-1 | Response-1 | TextField | True | False | | response-2 | Response-2 | TextField | True | False | The **questions** are the questions that will be asked to the annotators. They can be of different types, such as rating, text, single choice, or multiple choice. | Question Name | Title | Type | Required | Description | Values/Labels | | ------------- | ----- | ---- | -------- | ----------- | ------------- | | choose-best | Choose the best response: | RatingQuestion | True | Choose the most helpful, harmless, and truthful response. Select 1 for response-1, 2 for response-2, or discard if both are equally good/bad. | [1, 2] | Finally, the **guidelines** are just a plain string that can be used to provide instructions to the annotators. Find those in the [annotation guidelines](#annotation-guidelines) section. ### Data Instances An example of a dataset instance in Argilla looks as follows: ```json { "external_id": "518", "fields": { "instruction": "What is Depreciation", "response-1": "Depreciation is the drop in value of an asset due to wear and tear, age and obsolescence (going out of date) as recorded in an organization\u0027s financial records.", "response-2": "What is Depreciation \u2013 10 Important Facts to Know?\nWhen a business buys a new asset, the purchase price of that asset is depreciated over time to reflect its usage and eventual obsolescence. Depreciation expense can be a tax deductible expense and is usually a non-cash expense reported on a company\u2019s income statement and balance sheet. The amount of depreciation expense a company reports each year is the difference between the original purchase price of the asset and what the current value of that asset might be. Here are 10 important facts to know about depreciation:\n1. Depreciation is a non-cash expense. It is an expense that is reported in a business\u2019s income statement and balance sheet and not a cash flow expense.\n2. Depreciation is an accounting standard and it is required to be disclosed in a business\u2019s financial statements.\n3. The amount of depreciation is usually a tax expense and not a cash expense reported on a company\u2019s income statement" }, "responses": [ { "status": "submitted", "user_id": null, "values": { "choose-best": { "value": 1 } } } ] } ``` While the same record in HuggingFace `datasets` looks as follows: ```json { "choose-best": { "status": [ "submitted" ], "user_id": [ null ], "value": [ 1 ] }, "external_id": "518", "instruction": "What is Depreciation", "response-1": "Depreciation is the drop in value of an asset due to wear and tear, age and obsolescence (going out of date) as recorded in an organization\u0027s financial records.", "response-2": "What is Depreciation \u2013 10 Important Facts to Know?\nWhen a business buys a new asset, the purchase price of that asset is depreciated over time to reflect its usage and eventual obsolescence. Depreciation expense can be a tax deductible expense and is usually a non-cash expense reported on a company\u2019s income statement and balance sheet. The amount of depreciation expense a company reports each year is the difference between the original purchase price of the asset and what the current value of that asset might be. Here are 10 important facts to know about depreciation:\n1. Depreciation is a non-cash expense. It is an expense that is reported in a business\u2019s income statement and balance sheet and not a cash flow expense.\n2. Depreciation is an accounting standard and it is required to be disclosed in a business\u2019s financial statements.\n3. The amount of depreciation is usually a tax expense and not a cash expense reported on a company\u2019s income statement" } ``` ### Data Fields Among the dataset fields, we differentiate between the following: * **Fields:** These are the dataset records themselves, for the moment just text fields are suppported. These are the ones that will be used to provide responses to the questions. * **instruction** is of type `TextField`. * **response-1** is of type `TextField`. * **response-2** is of type `TextField`. * **Questions:** These are the questions that will be asked to the annotators. They can be of different types, such as rating, text, single choice, or multiple choice. * **choose-best** is of type `RatingQuestion` with the following allowed values [1, 2], and description "Choose the most helpful, harmless, and truthful response. Select 1 for response-1, 2 for response-2, or discard if both are equally good/bad.". Additionally, we also have one more field which is optional and is the following: * **external_id:** This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file. ### Data Splits The dataset contains a single split, which is `train`. ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation guidelines These guidelines are based on the paper [Training Language Models to Follow Instructions with Human Feedback](https://cdn.openai.com/papers/Training_language_models_to_follow_instructions_with_human_feedback.pdf) **You are given a text-based description of a task, submitted by a user.** This task description may be in the form of an explicit instruction (e.g. "Write a story about a wise frog."). The task may also be specified indirectly, for example by using several examples of the desired behavior (e.g. given a sequence of movie reviews followed by their sentiment, followed by one movie review without a sentiment, you can assume the task is to predict the sentiment of the final review), or by generating the start of a desired output (e.g. given "There once was a wise frog named Julius," you can assume the task is to continue the story). **You will also be given several text outputs, intended to help the user with their task.** Your job is to evaluate these outputs to ensure that they are helpful, truthful, and harmless, and select the best according to these criteria. In case **none of the outputs is correct, conforming to the criteria, or they're exactly equal, use the discard option to not rank them**. For most tasks, being truthful and harmless is more important than being helpful. By **helpful**, we mean that the output should follow the user's intention and help the user solve their task. Some examples of helpful behavior: - Writing in clear language. - Answering the question they meant to ask, even if they mis-asked it. - Being sensitive to internationality (e.g., "football" shouldn't mean American football, and "the president" doesn't necessarily mean the US president). - If an instruction is too confusing, asking for clarification and explaining why the instruction is confusing. - Not giving overly long or rambling answers, or repeating information from the question. - Not assuming extraneous extra context outside of what's given (besides things like facts about the world), unless that's an implied part of the task. For example, if asked to "write a polite email response to this email: email body," the output shouldn't assume "I can't make it this time, but am free next weekend." But if asked to "Write an email to Socrates," then you can safely assume the output can make up a context. By **truthful**, we mean that the output contains accurate information and doesn't mislead the user. Some examples of truthful behavior: - On tasks like summarization, where the output should only use information from input, not making up details that are not part of the input description. - Not producing clearly false information about the world (e.g., making up facts or promoting conspiracies). For example, the output should not state that Hillary Clinton has served time in prison. - Avoid generating misleading information or information with questionable authenticity. For example, if the input asks "Why did Hillary Clinton go to jail?", the output should not say "It's not totally clear," but rather should refute the premise of the question. By **harmless**, we mean that the output should not cause physical, psychological, or social harm to people; damage to or loss of equipment or property; damage to the environment; or harm to institutions or resources necessary to human well-being. Some examples of harmless behavior: - Treating other humans with kindness, respect, and consideration; not denigrating members of certain groups or using biased language against a particular group. - Not generating abusive, threatening, or offensive language or promoting violence. - Not writing sexual or violent content if it's not asked for. - Not giving bad real-world advice or promoting illegal activity. Evaluating model outputs may involve making trade-offs between these criteria. These trade-offs will depend on the task. #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
argilla/comparison-data-falcon-with-feedback
[ "size_categories:1K<n<10K", "rlfh", "argilla", "human-feedback", "region:us" ]
2023-06-07T12:54:15+00:00
{"size_categories": "1K<n<10K", "tags": ["rlfh", "argilla", "human-feedback"]}
2023-06-07T13:38:44+00:00
1bc50f870c053c6a94993cab2da5d640d8fe0b6c
# Dataset Card for "eef0e7be" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
results-sd-v1-5-sd-v2-1-if-v1-0-karlo/eef0e7be
[ "region:us" ]
2023-06-07T12:54:46+00:00
{"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 182, "num_examples": 10}], "download_size": 1341, "dataset_size": 182}}
2023-06-07T12:54:47+00:00
3e58f6ceb3e103f3e06627eabd491a6374950bdc
# Dataset Card for "a74d7c24" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
results-sd-v1-5-sd-v2-1-if-v1-0-karlo/a74d7c24
[ "region:us" ]
2023-06-07T12:58:04+00:00
{"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 180, "num_examples": 10}], "download_size": 1339, "dataset_size": 180}}
2023-06-07T12:58:04+00:00
22d3690a26689976f2b50733fb1e217af8547067
# Dataset Card for "pytorch-discuss-tutorial-1000" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
shrinath-suresh/pytorch-discuss-tutorial-1000
[ "region:us" ]
2023-06-07T12:58:42+00:00
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2714553, "num_examples": 1000}], "download_size": 1230547, "dataset_size": 2714553}}
2023-06-07T12:58:44+00:00
14a3c3715198d43df0ed7288164119ed34d5f7bf
# Reasonix Reasonix is a collection of 11,000 pairs of "Question Answer" and "Choice with Answer" gathered from different datasets.
Fredithefish/Reasonix
[ "task_categories:question-answering", "task_categories:text-generation", "language:en", "license:apache-2.0", "region:us" ]
2023-06-07T13:00:46+00:00
{"language": ["en"], "license": "apache-2.0", "task_categories": ["question-answering", "text-generation"]}
2023-06-07T16:37:07+00:00
b0ffe95f774bbae6fb17b127793dceeb1926d001
# Dataset Card for "47439812" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
results-sd-v1-5-sd-v2-1-if-v1-0-karlo/47439812
[ "region:us" ]
2023-06-07T13:07:38+00:00
{"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 182, "num_examples": 10}], "download_size": 1340, "dataset_size": 182}}
2023-06-07T13:07:39+00:00
c84bfdd99d457e18dc0c87c83a7e89a1ca52ceab
tobiasvilliger/147
[ "license:apache-2.0", "region:us" ]
2023-06-07T13:11:26+00:00
{"license": "apache-2.0"}
2023-06-07T13:11:26+00:00