sha
stringlengths
40
40
text
stringlengths
0
13.4M
id
stringlengths
2
117
tags
list
created_at
stringlengths
25
25
metadata
stringlengths
2
31.7M
last_modified
stringlengths
25
25
bb0218238f3bbf87889870b52227e2f7a0f69c69
# Dataset Card for "restaurant_order_local_test_colab" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
figfig/restaurant_order_local_test_colab
[ "region:us" ]
2023-02-11T05:12:53+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 407597.0, "num_examples": 3}, {"name": "test", "num_bytes": 407597.0, "num_examples": 3}], "download_size": 818170, "dataset_size": 815194.0}}
2023-02-11T05:13:36+00:00
6b996a4f9455cf7960b1c89cf3c05b80791c727b
# Dataset Card for "RuDevSberDS" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
MegaKosT/RuDevSberDS
[ "region:us" ]
2023-02-11T06:27:49+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "transcription", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 5069939576.368, "num_examples": 43576}, {"name": "test", "num_bytes": 1096369193.45, "num_examples": 9225}, {"name": "validation", "num_bytes": 2231862564.3, "num_examples": 18450}], "download_size": 8198117805, "dataset_size": 8398171334.118}}
2023-02-11T06:50:40+00:00
5ad70f98bd30e4c70283c11f03b46b4f47729d16
# AutoTrain Dataset for project: flan-xl-conversation ## Dataset Description This dataset has been automatically processed by AutoTrain for project flan-xl-conversation. ### Languages The BCP-47 code for the dataset's language is unk. ## Dataset Structure ### Data Instances A sample from this dataset looks as follows: ```json [ { "text": "What is the largest insect in the world?", "target": "The largest insect in the world is the Goliath Beetle." }, { "text": "What is the largest amphibian in the world?", "target": "The largest amphibian in the world is the Chinese giant salamander." } ] ``` ### Dataset Fields The dataset has the following fields (also called "features"): ```json { "text": "Value(dtype='string', id=None)", "target": "Value(dtype='string', id=None)" } ``` ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow: | Split name | Num samples | | ------------ | ------------------- | | train | 158 | | valid | 40 |
Jonnylaw/autotrain-data-flan-xl-conversation
[ "task_categories:summarization", "region:us" ]
2023-02-11T07:07:13+00:00
{"task_categories": ["summarization"]}
2023-02-11T07:07:39+00:00
04be7aaff8d903a559f7248b0363e4b33a76b90c
# AutoTrain Dataset for project: flan-large-conv ## Dataset Description This dataset has been automatically processed by AutoTrain for project flan-large-conv. ### Languages The BCP-47 code for the dataset's language is unk. ## Dataset Structure ### Data Instances A sample from this dataset looks as follows: ```json [ { "text": "What is the largest insect in the world?", "target": "The largest insect in the world is the Goliath Beetle." }, { "text": "What is the largest amphibian in the world?", "target": "The largest amphibian in the world is the Chinese giant salamander." } ] ``` ### Dataset Fields The dataset has the following fields (also called "features"): ```json { "text": "Value(dtype='string', id=None)", "target": "Value(dtype='string', id=None)" } ``` ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow: | Split name | Num samples | | ------------ | ------------------- | | train | 158 | | valid | 40 |
Jonnylaw/chat-conversation-trainer
[ "task_categories:summarization", "region:us" ]
2023-02-11T07:19:28+00:00
{"task_categories": ["summarization"]}
2023-02-11T07:19:50+00:00
19cd5192aef3a4568775ebedf7e1f2f2f2b5c939
# Dataset Card for "Fin_Corpus_EarningCall" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
FINDA-FIT/Fin_Corpus_EarningCall
[ "region:us" ]
2023-02-11T07:31:37+00:00
{"dataset_info": {"features": [{"name": "ID", "dtype": "string"}, {"name": "CONTEXT", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 8724423352, "num_examples": 234119}], "download_size": 4615593313, "dataset_size": 8724423352}}
2023-02-14T04:58:13+00:00
d18c42bb45965f6ce0a6402ec26551a8fa5a60e3
# EASY - Ensemble Augmented-Shot Y-shaped Learning: State-Of-The-Art Few-Shot Classification with Simple Ingredients. This repository contains the data of the second version of the paper [EASY - Ensemble Augmented-Shot Y-shaped Learning: State-Of-The-Art Few-Shot Classification with Simple Ingredients](https://arxiv.org/pdf/2201.09699.pdf).
ybendou/easy-v2
[ "license:apache-2.0", "arxiv:2201.09699", "region:us" ]
2023-02-11T07:43:49+00:00
{"license": "apache-2.0"}
2023-02-11T19:57:55+00:00
78341365858aa84623265c276997f93339021c71
# Dataset Card for "deprem_satellite_semantic_whu_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
deprem-ml/deprem_satellite_semantic_whu_dataset
[ "region:us" ]
2023-02-11T10:34:24+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 3110422171.552, "num_examples": 4736}, {"name": "validation", "num_bytes": 659257774.42, "num_examples": 1036}, {"name": "test", "num_bytes": 307212582.0, "num_examples": 500}], "download_size": 4016000111, "dataset_size": 4076892527.972}}
2023-02-11T10:49:51+00:00
77d2b9f9f5d596ab0316996e4c1eccbc0dd053cc
# Dataset Card for "tiktok-people" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yaeldekel/tiktok-people
[ "region:us" ]
2023-02-11T10:55:36+00:00
{"dataset_info": {"features": [{"name": "pixel_values", "dtype": "image"}, {"name": "label", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 1093773.0, "num_examples": 3}], "download_size": 1095552, "dataset_size": 1093773.0}}
2023-02-11T10:55:45+00:00
565898c27f119fbe8b302525fb914c1b7544f403
mihirinamdar/finqa
[ "license:mit", "region:us" ]
2023-02-11T11:13:31+00:00
{"license": "mit"}
2023-02-14T17:30:30+00:00
e571779bd04d3a09113d8cc6f4206912dec2cbcc
# Dataset Card for "wikipedia_stage2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
MartinKu/wikipedia_stage2
[ "region:us" ]
2023-02-11T14:38:47+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "S_V", "sequence": "string"}, {"name": "S_V_position", "sequence": "int64"}, {"name": "O_C", "sequence": "string"}, {"name": "O_C_position", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 45092871426, "num_examples": 6458670}], "download_size": 25091808148, "dataset_size": 45092871426}}
2023-02-14T13:43:57+00:00
a622fdab5491246125756a2c39cab72fb50caf03
# Dataset Card for Dataset kotlin_code ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This Dataset contains Kotlin functions with there documentation. This dataset can be useful in fine-tuning or creating new models for developing models which can generate the code documentaiton ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
codkiller0911/kotlin_code
[ "size_categories:1K<n<10K", "language:en", "kotlin", "android", "region:us" ]
2023-02-11T14:39:47+00:00
{"language": ["en"], "size_categories": ["1K<n<10K"], "tags": ["kotlin", "android"]}
2023-02-11T16:42:21+00:00
6755eda595f7d32fc815c8107dee27c733c866a7
# Dataset Card for Dataset Name ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary Dataset contains pairs of sentences with next_sentence_label for NSP. Sentences was given from public jira projects dataset. Next sentence is always next sentence in one comment or sentence from reply to the comment. ### Supported Tasks and Leaderboards NSP, MLM ### Languages English ## Dataset Structure sentence_a, sentence_b, next_sentence_label ### Source Data https://zenodo.org/record/5901804#.Y_Xv4HZBxD9
pheepa/jira-comments-nsp
[ "task_categories:text-generation", "size_categories:1M<n<10M", "language:en", "jira", "region:us" ]
2023-02-11T14:43:28+00:00
{"language": ["en"], "size_categories": ["1M<n<10M"], "task_categories": ["text-generation"], "pretty_name": "jira-comments", "tags": ["jira"]}
2023-02-22T10:37:20+00:00
b1296aef91bb33dc580c8582da4cf3cad4004b07
# Lora - hanboka-000003 ## Dataset Description - **원본** [한복 lora](https://arca.live/b/aiart/69417775) 한복 로라 파일 프롬프트에 hanbok, korean clothes 가중치 0.8 권장 [다운로드](https://huggingface.co/datasets/AIARTCHAN/lora-hanboka-000003/resolve/main/hanboka-000003.safetensors)
AIARTCHAN/lora-hanboka-000003
[ "license:creativeml-openrail-m", "lora", "aiartchan", "stable-diffusion", "region:us" ]
2023-02-11T15:55:24+00:00
{"license": "creativeml-openrail-m", "tags": ["lora", "aiartchan", "stable-diffusion"]}
2023-02-11T16:02:16+00:00
33765d9f7f731aa69fa795c803718db10be4e0fc
# Lora - Hanbok_LoRA_V2 ## Dataset Description - **원본** [한복 로라 다시 만들어왔음](https://arca.live/b/aiart/69505242) 한복 로라 파일 예시 프롬프트 ``` (masterpiece, best quality:1.2), 1girl, solo, <lora:Hanbok_LoRA_V2:0.8>, hanbok, korean_clothes, smile, best ratio four finger and one thumb, looking_at_viewer, cowboy_shot, (white background), (simple background) Negative prompt: EasyNegative, extra fingers,fewer fingers, watermark, text, animal_ears, japanese_clothes, kimono, chinese_clothes ``` ``` Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 10, Size: 640x960, Model: Counterfeit-V2.5 Denoising strength: 0.6, Clip skip: 2, Hires upscale: 1.5, Hires upscaler: Latent ``` 가중치 0.8 권장 [다운로드](https://huggingface.co/datasets/AIARTCHAN/lora-Hanbok_LoRA_V2/resolve/main/Hanbok_LoRA_V2.safetensors)
AIARTCHAN/lora-Hanbok_LoRA_V2
[ "license:creativeml-openrail-m", "lora", "aiartchan", "stable-diffusion", "region:us" ]
2023-02-11T16:13:21+00:00
{"license": "creativeml-openrail-m", "tags": ["lora", "aiartchan", "stable-diffusion"]}
2023-02-11T16:15:47+00:00
f744a6ae04b17e94d5788ed380177a03f44043c8
# Dataset Card for "restaurant_order_HSR_test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
figfig/restaurant_order_HSR_test
[ "region:us" ]
2023-02-11T17:29:22+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 267265.0, "num_examples": 6}, {"name": "test", "num_bytes": 267265.0, "num_examples": 6}], "download_size": 537202, "dataset_size": 534530.0}}
2023-02-11T19:45:43+00:00
b76521e6f581c3a3dfa482cb4476cc708e9df34f
# Dataset Card for [Dataset Name] ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) ## Dataset Description - **Homepage:** https://huggingface.co/datasets/RayhanADev/replit-comments-categorized - **Repository:** https://huggingface.co/datasets/RayhanADev/replit-comments-categorized - **Point of Contact:** [email protected] ### Dataset Summary Comments from [Replit](https://replit.com/)'s Community, sourced via moderator GraphQL queries and personally labeled :). For use in Replit + Weights and Biases Hackathon. ### Supported Tasks and Leaderboards Text Classification ### Languages English ## Dataset Structure ### Data Instances ```json {"label":3,"text":"@KENDALPETERSON\nShut up you dont have a permit to brag."} ``` Labels - 0: General - 1: Spam - 2: NSFW - 3: Harassment ### Data Fields Label, Text ### Data Splits Train, Validation, Test ## Dataset Creation ### Curation Rationale Fine-tuning data for the Replit + Weights and Biases Hackathon. ### Source Data #### Initial Data Collection and Normalization This data was collected via Replit's GraphQL API using a query only available to site moderators and admins, allowing for querying comments by pattern. By not setting a pattern one could get up to 50 comments within a given time span. Data was sourced from February 1st, 2023 to February 10th, 2023. #### Who are the source language producers? Replit Community users ### Annotations #### Annotation process I tagged the data using a small website that I made to assign labels to individual comments. ![](https://media.furret.dev/JKnDLTKMqF) #### Who are the annotators? Me ([@rayhanadev](https://www.furret.dev)) ### Personal and Sensitive Information This dataset is purely unfiltered and may contain personal and sensitive information. ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases The data is categorized according to three categories of content that are not allowed on Replit (and would fall into a fourth "general" category if it is acceptable content). ### Other Known Limitations The dataset is small (n<1k) but I am working on increasing the amount of available data. There is also significantly more unacceptable content labelled than there is acceptable ("general") content. ## Additional Information ### Dataset Curators Me ([@rayhanadev](https://www.furret.dev)) ### Licensing Information This data is licensed under MIT but Replit or any concerned parties is free to issue a takedown of this dataset. Any person who's data is available in this dataset is free to request that their data is deleted, however it is not a breach of Replit's data use policy for this data to be available.
RayhanADev/replit-comments-categorized
[ "task_categories:text-classification", "task_ids:intent-classification", "annotations_creators:expert-generated", "language_creators:found", "multilinguality:monolingual", "size_categories:n<1K", "source_datasets:original", "language:en", "license:mit", "replit", "comments", "forum", "chat", "intent", "classification", "doi:10.57967/hf/0351", "region:us" ]
2023-02-11T18:25:37+00:00
{"annotations_creators": ["expert-generated"], "language_creators": ["found"], "language": ["en"], "license": ["mit"], "multilinguality": ["monolingual"], "size_categories": ["n<1K"], "source_datasets": ["original"], "task_categories": ["text-classification"], "task_ids": ["intent-classification"], "pretty_name": "Replit Comments Categorized", "tags": ["replit", "comments", "forum", "chat", "intent", "classification"]}
2023-02-11T22:17:51+00:00
30094805ea29d75b542d6c3f0288e4560a9fa13a
# Dataset Card for "mc4_es_cl" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
jorgeortizfuentes/mc4_es_cl
[ "region:us" ]
2023-02-11T18:30:50+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "timestamp", "dtype": "string"}, {"name": "url", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 33474281363.716232, "num_examples": 8698099}, {"name": "validation", "num_bytes": 33072762.075400714, "num_examples": 8582}], "download_size": 15903346209, "dataset_size": 33507354125.791634}}
2023-02-12T01:10:09+00:00
7b979192cccfe2c28c02c9f9dfccbcc88daaf983
pacovaldez/pandas-questions
[ "license:apache-2.0", "region:us" ]
2023-02-11T18:35:38+00:00
{"license": "apache-2.0"}
2023-03-06T20:16:51+00:00
44069e5a58a02cd0aae1e8af2a625c6d2ebb4f03
# Dataset Card for "MedQA-USMLE-4-options-hf-MiniLM-IR-cs" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
GBaker/MedQA-USMLE-4-options-hf-MiniLM-IR-cs
[ "region:us" ]
2023-02-11T18:45:57+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "sent1", "dtype": "string"}, {"name": "sent2", "dtype": "string"}, {"name": "ending0", "dtype": "string"}, {"name": "ending1", "dtype": "string"}, {"name": "ending2", "dtype": "string"}, {"name": "ending3", "dtype": "string"}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 1933180, "num_examples": 1273}, {"name": "validation", "num_bytes": 1905261, "num_examples": 1272}, {"name": "train", "num_bytes": 15360790, "num_examples": 10178}], "download_size": 11125239, "dataset_size": 19199231}}
2023-02-11T23:26:10+00:00
216cacef201c58138a9b8b223ae62f424b1c20e1
# AutoTrain Dataset for project: exacts ## Dataset Description This dataset has been automatically processed by AutoTrain for project exacts. ### Languages The BCP-47 code for the dataset's language is unk. ## Dataset Structure ### Data Instances A sample from this dataset looks as follows: ```json [ { "text": "How do plants adapt to different environments?", "target": "Plants adapt to different environments through a variety of mechanisms. Some examples include developing deep roots to access water in dry regions, growing smaller leaves to reduce water loss in hot regions, or producing chemicals to deter herbivores in regions with high herbivore populations." }, { "text": "How do volcanoes erupt?", "target": "Volcanoes erupt when pressure from molten rock, ash, and gas inside the volcano builds up and eventually forces its way to the surface. This can happen through a volcano's central vent or through fissures on its slopes." } ] ``` ### Dataset Fields The dataset has the following fields (also called "features"): ```json { "text": "Value(dtype='string', id=None)", "target": "Value(dtype='string', id=None)" } ``` ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow: | Split name | Num samples | | ------------ | ------------------- | | train | 156 | | valid | 39 |
Jonnylaw/questioning-dataset
[ "task_categories:summarization", "region:us" ]
2023-02-11T19:39:14+00:00
{"task_categories": ["summarization"]}
2023-02-11T19:39:35+00:00
eb952841bd8fec669fa91a71612655c7170681f1
### Contributions Thanks to [@raldblox](https://github.com/raldblox) for adding this dataset.
raldblox/cotlin
[ "license:openrail", "region:us" ]
2023-02-11T20:18:54+00:00
{"license": "openrail"}
2023-02-11T21:17:02+00:00
a6d1450239b5fe6d4136082f80132777e8515674
azmisahin/dataset
[ "license:mit", "region:us" ]
2023-02-11T20:54:41+00:00
{"license": "mit"}
2023-02-11T20:54:42+00:00
9039ffce5218e96cf2d9aa45affa53be78fd4d62
Bjorndavidhansen/W
[ "license:openrail", "region:us" ]
2023-02-11T21:29:59+00:00
{"license": "openrail"}
2023-02-11T21:29:59+00:00
89d026fff4cc5c020228821c51ae6012e77b2485
# HC3-textgen-qa - the `Hello-SimpleAI/HC3` reformatted for textgen - special tokens for question/answer, see dataset preview
pszemraj/HC3-textgen-qa
[ "task_categories:text-generation", "source_datasets:Hello-SimpleAI/HC3", "language:en", "license:apache-2.0", "chatgpt", "conversation", "region:us" ]
2023-02-11T22:42:21+00:00
{"language": ["en"], "license": "apache-2.0", "source_datasets": "Hello-SimpleAI/HC3", "task_categories": ["text-generation"], "pretty_name": "HC3 for QA textgen", "tags": ["chatgpt", "conversation"]}
2023-02-11T22:56:14+00:00
b1d3a9287f5ea06ee21498551c08159b56a66a4f
# Dataset Card for "quran-tafsir" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tarteel-ai/quran-tafsir
[ "region:us" ]
2023-02-11T23:18:20+00:00
{"dataset_info": {"features": [{"name": "en-ahmedali", "dtype": "string"}, {"name": "en-ahmedraza", "dtype": "string"}, {"name": "en-arberry", "dtype": "string"}, {"name": "en-asad", "dtype": "string"}, {"name": "en-daryabadi", "dtype": "string"}, {"name": "en-hilali", "dtype": "string"}, {"name": "en-itani", "dtype": "string"}, {"name": "en-maududi", "dtype": "string"}, {"name": "en-mubarakpuri", "dtype": "string"}, {"name": "en-pickthall", "dtype": "string"}, {"name": "en-qarai", "dtype": "string"}, {"name": "en-qaribullah", "dtype": "string"}, {"name": "en-sahih", "dtype": "string"}, {"name": "en-sarwar", "dtype": "string"}, {"name": "en-shakir", "dtype": "string"}, {"name": "en-transliterati", "dtype": "string"}, {"name": "en-wahiduddi", "dtype": "string"}, {"name": "en-yusufali", "dtype": "string"}, {"name": "surah", "dtype": "int64"}, {"name": "ayah", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 16266291, "num_examples": 6236}], "download_size": 9038013, "dataset_size": 16266291}}
2023-02-11T23:18:23+00:00
69b32ad2941d3c12fdfc902f9fb356401ae08711
# Dataset Card for "curiosamente" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
juancopi81/curiosamente
[ "task_categories:automatic-speech-recognition", "whisper", "whispering", "large", "region:us" ]
2023-02-12T03:04:13+00:00
{"task_categories": ["automatic-speech-recognition"], "dataset_info": {"features": [{"name": "CHANNEL_NAME", "dtype": "string"}, {"name": "URL", "dtype": "string"}, {"name": "TITLE", "dtype": "string"}, {"name": "DESCRIPTION", "dtype": "string"}, {"name": "TRANSCRIPTION", "dtype": "string"}, {"name": "SEGMENTS", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2864256, "num_examples": 122}], "download_size": 1516584, "dataset_size": 2864256}, "tags": ["whisper", "whispering", "large"]}
2023-02-16T22:05:33+00:00
f1b3f0bb2af209ba38d14a877b029043c573ebc0
# Chilean Spanish Corpus ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary [More Information Needed] ### Supported Tasks and Leaderboards [More Information Needed] ### Languages Chilean Spanish ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@jorgeortizfuentes](https://github.com/jorgeortizfuentes) for adding this dataset.
jorgeortizfuentes/chilean-spanish-corpus
[ "task_categories:text-generation", "task_categories:fill-mask", "annotations_creators:no-annotation", "language_creators:found", "multilinguality:monolingual", "size_categories:10M<n<100M", "source_datasets:original", "language:es", "license:cc-by-sa-4.0", "region:us" ]
2023-02-12T03:05:27+00:00
{"annotations_creators": ["no-annotation"], "language_creators": ["found"], "language": ["es"], "license": ["cc-by-sa-4.0"], "multilinguality": ["monolingual"], "size_categories": ["10M<n<100M"], "source_datasets": ["original"], "task_categories": ["text-generation", "fill-mask"], "pretty_name": "Chilean Spanish Corpus", "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 31427795307.483433, "num_examples": 37126025}], "download_size": 18718981152, "dataset_size": 31427795307.483433}}
2023-02-13T04:13:50+00:00
0d8aaaf6a4affbd04595facfed61e3c6188aa425
# Dataset Card for "poetaexmachina-recitations-lsb" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
lsb/poetaexmachina-recitations-lsb
[ "region:us" ]
2023-02-12T04:08:01+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 817933.0, "num_examples": 4}], "download_size": 733536, "dataset_size": 817933.0}}
2023-02-12T04:26:01+00:00
b387a33d847b7355b9e00d8106b88a90a37d8665
# Dataset Card for "poetaexmachina-recitations-milli-d6vergil" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
lsb/poetaexmachina-recitations-milli-d6vergil
[ "region:us" ]
2023-02-12T04:27:03+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2850254.0, "num_examples": 17}], "download_size": 2612867, "dataset_size": 2850254.0}}
2023-02-12T04:27:16+00:00
6a334b9611d19be0e2559a140a4338036cb3c4a0
# Dataset Card for "poetaexmachina-recitations-milli-onegrams" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
lsb/poetaexmachina-recitations-milli-onegrams
[ "region:us" ]
2023-02-12T04:27:17+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 7916446.0, "num_examples": 177}], "download_size": 6517487, "dataset_size": 7916446.0}}
2023-02-12T04:27:32+00:00
432ac0e2243a1eba785227110bf580bd102d1688
# Dataset Card for "poetaexmachina-recitations-d6vergil" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
lsb/poetaexmachina-recitations-d6vergil
[ "region:us" ]
2023-02-12T04:27:34+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2443347256.8, "num_examples": 15287}], "download_size": 2195373036, "dataset_size": 2443347256.8}}
2023-02-12T04:30:03+00:00
7f78309bc5b3ec78290bcaa842b6b2d8310b3a48
# Dataset Card for "poetaexmachina-recitations-onegrams" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
lsb/poetaexmachina-recitations-onegrams
[ "region:us" ]
2023-02-12T04:30:15+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 6909913371.324, "num_examples": 176826}], "download_size": 6026737051, "dataset_size": 6909913371.324}}
2023-02-12T04:38:19+00:00
01f17a02927660c027d27b452e5e60e678f1869a
akadhim-ai/dilbert-comic-dataset
[ "license:openrail", "region:us" ]
2023-02-12T05:54:10+00:00
{"license": "openrail", "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "train"}}}}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1846493.0, "num_examples": 50}], "download_size": 0, "dataset_size": 1846493.0}}
2023-02-12T05:57:49+00:00
332fb09320fa5835b48d06200b74108e68fb50d0
ashwinpokee/parrot_paraphraser_on_T5
[ "license:artistic-2.0", "doi:10.57967/hf/0372", "region:us" ]
2023-02-12T06:04:47+00:00
{"license": "artistic-2.0"}
2023-02-12T06:23:53+00:00
a831833e6323636e673c0e87a8950fda2e59824b
This is a conversational dataset, collected from WarOnine Israeli military forum Language = Russian (with hebrew addins) Target Audience = Military Dataset has been used to train a Military Chat Bot
kertser/WarOnline
[ "size_categories:100K<n<1M", "language:ru", "license:apache-2.0", "NLP", "WarOnline", "ChatBot", "Conversational", "region:us" ]
2023-02-12T07:09:49+00:00
{"language": ["ru"], "license": "apache-2.0", "size_categories": ["100K<n<1M"], "pretty_name": "War Online", "tags": ["NLP", "WarOnline", "ChatBot", "Conversational"]}
2023-02-12T08:18:01+00:00
ac96eaa2363cc7454a810c7e3882d3f8f58213ff
# Dataset Card for "render_text" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
achang/render_text
[ "region:us" ]
2023-02-12T08:15:23+00:00
{"dataset_info": {"features": [{"name": "review_body", "dtype": "string"}, {"name": "image", "dtype": "image"}, {"name": "section", "dtype": "image"}, {"name": "section_text", "dtype": "string"}, {"name": "pos_section_txt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 6082763237.75, "num_examples": 63057}, {"name": "validation", "num_bytes": 150630977.75, "num_examples": 1561}, {"name": "test", "num_bytes": 152523383.75, "num_examples": 1581}], "download_size": 6373937421, "dataset_size": 6385917599.25}}
2023-02-19T07:22:49+00:00
0d95002952e1fe53a2f2e9e761bed1fa737229f8
Creation (Copied & adapted from https://github.com/stanford-crfm/helm/blob/0eaaa62a2263ddb94e9850ee629423b010f57e4a/src/helm/benchmark/scenarios/babi_qa_scenario.py): ```python !wget http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2.tar.gz !tar -xf tasks_1-20_v1-2.tar.gz import json from typing import List tasks = list(range(1, 20)) splits = ["train", "valid", "test"] def process_path(path: str) -> str: """Turn a path string (task 19) from the original format 's,w' to a verbal model-friendly format 'south west'""" steps: List[str] = path.split(",") directions = {"s": "south", "n": "north", "e": "east", "w": "west"} path = " ".join([directions[step] for step in steps]) return path for split in splits: with open(f"babi_{split}.jsonl", "w") as f_base: for task in tasks: split_path: str = f"./tasks_1-20_v1-2/en-valid/qa{task}_{split}.txt" with open(split_path, "r") as f: facts = list(f) story: List[str] = [] for fact in facts: fid = int(fact.split(" ")[0]) if fid == 1: story = [] fact = " ".join(fact.split(" ")[1:]) is_question = "?" in fact if is_question: question, answer = fact.split("\t")[:2] question, answer = question.strip(), answer.strip() # All tasks except task 19 have a verbal single-word answer (e.g. kitchen, apple, yes). # Task 19 (path finding) has a non verbal answer format ( if task == 19: answer = process_path(answer) f_base.write(json.dumps({ "passage": "".join(story), "question": question, "answer": answer, "task": task, }) + "\n") if "?" in story: print("STORY", "".join(story)) else: story.append(fact) ```
Muennighoff/babi
[ "region:us" ]
2023-02-12T09:19:00+00:00
{}
2023-02-12T13:34:24+00:00
aaff98ff0e2e55dd18256a02ca3f8eca990881cc
# RAFT submissions for my-raft-submission ## Submitting to the leaderboard To make a submission to the [leaderboard](https://huggingface.co/spaces/ought/raft-leaderboard), there are three main steps: 1. Generate predictions on the unlabeled test set of each task 2. Validate the predictions are compatible with the evaluation framework 3. Push the predictions to the Hub! See the instructions below for more details. ### Rules 1. To prevent overfitting to the public leaderboard, we only evaluate **one submission per week**. You can push predictions to the Hub as many times as you wish, but we will only evaluate the most recent commit in a given week. 2. Transfer or meta-learning using other datasets, including further pre-training on other corpora, is allowed. 3. Use of unlabeled test data is allowed, as is it always available in the applied setting. For example, further pre-training using the unlabeled data for a task would be permitted. 4. Systems may be augmented with information retrieved from the internet, e.g. via automated web searches. ### Submission file format For each task in RAFT, you should create a CSV file called `predictions.csv` with your model's predictions on the unlabeled test set. Each file should have exactly 2 columns: * ID (int) * Label (string) See the dummy predictions in the `data` folder for examples with the expected format. Here is a simple example that creates a majority-class baseline: ```python from pathlib import Path import pandas as pd from collections import Counter from datasets import load_dataset, get_dataset_config_names tasks = get_dataset_config_names("ought/raft") for task in tasks: # Load dataset raft_subset = load_dataset("ought/raft", task) # Compute majority class over training set counter = Counter(raft_subset["train"]["Label"]) majority_class = counter.most_common(1)[0][0] # Load predictions file preds = pd.read_csv(f"data/{task}/predictions.csv") # Convert label IDs to label names preds["Label"] = raft_subset["train"].features["Label"].int2str(majority_class) # Save predictions preds.to_csv(f"data/{task}/predictions.csv", index=False) ``` As you can see in the example, each `predictions.csv` file should be stored in the task's subfolder in `data` and at the end you should have something like the following: ``` data ├── ade_corpus_v2 │ ├── predictions.csv │ └── task.json ├── banking_77 │ ├── predictions.csv │ └── task.json ├── neurips_impact_statement_risks │ ├── predictions.csv │ └── task.json ├── one_stop_english │ ├── predictions.csv │ └── task.json ├── overruling │ ├── predictions.csv │ └── task.json ├── semiconductor_org_types │ ├── predictions.csv │ └── task.json ├── systematic_review_inclusion │ ├── predictions.csv │ └── task.json ├── tai_safety_research │ ├── predictions.csv │ └── task.json ├── terms_of_service │ ├── predictions.csv │ └── task.json ├── tweet_eval_hate │ ├── predictions.csv │ └── task.json └── twitter_complaints ├── predictions.csv └── task.json ``` ### Validate your submission To ensure that your submission files are correctly formatted, run the following command from the root of the repository: ``` python cli.py validate ``` If everything is correct, you should see the following message: ``` All submission files validated! ✨ 🚀 ✨ Now you can make a submission 🤗 ``` ### Push your submission to the Hugging Face Hub! The final step is to commit your files and push them to the Hub: ``` python cli.py submit ``` If there are no errors, you should see the following message: ``` Submission successful! 🎉 🥳 🎉 Your submission will be evaulated on Sunday 05 September 2021 ⏳ ``` where the evaluation is run every Sunday and your results will be visible on the leaderboard.
Linuxdex/my-raft-submission
[ "benchmark:raft", "region:us" ]
2023-02-12T09:19:33+00:00
{"benchmark": "raft", "type": "prediction", "submission_name": "AG-tt"}
2023-03-20T09:35:25+00:00
70b0346b9db77199203df5f077deeefec29af1da
# Dataset Card for "PneumoniaHippo" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
HippoLite/PneumoniaHippo
[ "region:us" ]
2023-02-12T09:59:44+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 3500970321.536, "num_examples": 11712}], "download_size": 2465721553, "dataset_size": 3500970321.536}}
2023-02-12T10:00:58+00:00
63eeeb7847fe648cd71277a3f8872f2be82cf199
# Dataset Card for "bookcorpus_compact_1024_shard6_of_10_meta" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
saibo/bookcorpus_compact_1024_shard6_of_10_meta
[ "region:us" ]
2023-02-12T10:10:53+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "concept_with_offset", "dtype": "string"}, {"name": "cid_arrangement", "sequence": "int32"}, {"name": "schema_lengths", "sequence": "int64"}, {"name": "topic_entity_mask", "sequence": "int64"}, {"name": "text_lengths", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 7837212848, "num_examples": 61605}], "download_size": 1730877027, "dataset_size": 7837212848}}
2023-02-12T10:40:35+00:00
bedcc4438826674ab0175c8d47caaca534175af2
# Peanut Comic Strip Dataset (Snoopy & Co.) ![Peanuts 1999/01/30](preview.png) This is a dataset Peanuts comic strips from `1950/10/02` to `2000/02/13`. There are `77,457` panels extracted from `17,816` comic strips. The dataset size is approximately `4.4G`. Each row in the dataset contains the following fields: - `image`: `PIL.Image` containing the extracted panel. - `panel_name`: unique identifier for the row. - `characters`: `tuple[str, ...]` of characters included in the comic strip the panel is part of. - `themes`: `tuple[str, ...]` of theme in the comic strip the panel is part of. - `color`: `str` indicating whether the panel is grayscale or in color. - `caption`: [BLIP-2_OPT_6.7B](https://huggingface.co/docs/transformers/main/model_doc/blip-2) generated caption from the panel. - `year`: `int` storing the year the specific panel was released. > **OPT-6.7B has a non-commercial use license and so this dataset cannot be used for commercial projects. If you need a dataset for commercial use please see [this similar dataset](https://huggingface.co/datasets/afmck/peanuts-flan-t5-xl) that uses Flan-T5-XL, which allows for commercial use.** Character and theme information was extracted from [Peanuts Wiki (Fandom)](https://peanuts.fandom.com/wiki/Peanuts_Wiki) using [Beautiful Soup](https://www.crummy.com/software/BeautifulSoup/bs4/doc/). Images were extracted from [Peanuts Search](https://peanuts-search.com/). Only strips with the following characters were extracted: ``` - "Charlie Brown" - "Sally Brown" - "Joe Cool" # Snoopy alter-ego - "Franklin" - "Violet Gray" - "Eudora" - "Frieda" - "Marcie" - "Peppermint Patty" - "Patty" - "Pig-Pen" - "Linus van Pelt" - "Lucy van Pelt" - "Rerun van Pelt" - "Schroeder" - "Snoopy" - "Shermy" - "Spike" - "Woodstock" - "the World War I Flying Ace" # Snoopy alter-ego ``` ### Extraction Details Panel detection and extraction was done using the following codeblock: ```python def check_contour(cnt): area = cv2.contourArea(cnt) if area < 600: return False _, _, w, h = cv2.boundingRect(cnt) if w / h < 1 / 2: return False if w / h > 2 / 1: return False return True def get_panels_from_image(path): panels = [] original_img = cv2.imread(path) gray = cv2.cvtColor(original_img, cv2.COLOR_BGR2GRAY) blur = cv2.GaussianBlur(gray, (5,5), 0) thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1] kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3)) opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=1) invert = 255 - opening cnts, _ = cv2.findContours(invert, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) idx = 0 for cnt in cnts: if not check_contour(cnt): continue idx += 1 x,y,w,h = cv2.boundingRect(cnt) roi = original_img[y:y+h,x:x+w] panels.append(roi) return panels ``` `check_contour` will reject panels with `area < 600` or with aspect ratios larger than `2` or smaller than `0.5`. Grayscale detection was done using the following codeblock: ```python def is_grayscale(panel): LAB_THRESHOLD = 10. img = cv2.cvtColor(panel, cv2.COLOR_RGB2LAB) _, ea, eb = cv2.split(img) de = abs(ea - eb) mean_e = np.mean(de) return mean_e < LAB_THRESHOLD ``` Captioning was done using the standard BLIP-2 pipeline shown in the [Huggingface docs](https://huggingface.co/docs/transformers/main/model_doc/blip-2) using beam search over 10 beams and a repetition penalty of `2.0`. Raw captions are extracted and no postprocessing is applied. You may wish to normalise captions (such as replacing "cartoon" with "peanuts cartoon") or incorporate extra metadata into prompts.
afmck/peanuts-opt-6.7b
[ "task_categories:text-to-image", "size_categories:10K<n<100K", "language:en", "license:other", "region:us" ]
2023-02-12T10:20:14+00:00
{"language": ["en"], "license": "other", "size_categories": ["10K<n<100K"], "task_categories": ["text-to-image"], "pretty_name": "Peanuts Dataset (Snoopy and Co.)", "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "panel_name", "dtype": "string"}, {"name": "characters", "sequence": "string"}, {"name": "themes", "sequence": "string"}, {"name": "color", "dtype": "string"}, {"name": "year", "dtype": "int64"}, {"name": "caption", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2948640650.848, "num_examples": 77456}], "download_size": 4601323640, "dataset_size": 2948640650.848}}
2023-04-03T15:09:56+00:00
04b8f88db2683b82aebbdce37d00234e957e147d
```bib @article{tang2022naughtyformer, title={The Naughtyformer: A Transformer Understands Offensive Humor}, author={Tang, Leonard and Cai, Alexander and Li, Steve and Wang, Jason}, journal={arXiv preprint arXiv:2211.14369}, year={2022} } ```
metaeval/offensive-humor
[ "region:us" ]
2023-02-12T10:40:39+00:00
{}
2023-02-12T10:42:32+00:00
b8139a0a66bc2c05983b378a043c700a2b919835
# Dataset Card for "pile-small-tokenized-2b" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
NeelNanda/pile-small-tokenized-2b
[ "region:us" ]
2023-02-12T12:20:37+00:00
{"dataset_info": {"features": [{"name": "tokens", "sequence": "int32"}], "splits": [{"name": "train", "num_bytes": 44263497500, "num_examples": 10795975}], "download_size": 19763664789, "dataset_size": 44263497500}}
2023-02-12T16:25:43+00:00
e3b6e2f4ccee9a7fd30f99d8dc46fb4da68ce376
gg4869/Lora_of_SD
[ "license:unknown", "region:us" ]
2023-02-12T12:30:24+00:00
{"license": "unknown"}
2023-02-12T12:30:24+00:00
5755097f4cb23f6bc76559e927b2310de365ceb8
# Dataset Card for "fnli" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Deojoandco/fnli
[ "region:us" ]
2023-02-12T13:04:48+00:00
{"dataset_info": {"features": [{"name": "premise", "dtype": "string"}, {"name": "hypothesis", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "entailment", "1": "neutral", "2": "contradiction"}}}}], "splits": [{"name": "train", "num_bytes": 61159046, "num_examples": 550152}, {"name": "validation", "num_bytes": 1120856, "num_examples": 10000}, {"name": "test", "num_bytes": 1117922, "num_examples": 10000}], "download_size": 20299372, "dataset_size": 63397824}}
2023-02-24T08:42:01+00:00
f0130d8e94f6463ab5cd2316c4da8130dec18cab
tguyt/myataset_test
[ "task_categories:question-answering", "language:en", "region:us" ]
2023-02-12T14:55:35+00:00
{"language": ["en"], "task_categories": ["question-answering"]}
2023-02-12T14:56:49+00:00
f11d658775bd3e0bbbd6a1b0f240003f980ebc94
# Dataset Card for "java_0" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
thmk/java_0
[ "region:us" ]
2023-02-12T15:01:14+00:00
{"dataset_info": {"features": [{"name": "code", "dtype": "string"}, {"name": "repo_name", "dtype": "string"}, {"name": "path", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "license", "dtype": "string"}, {"name": "size", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 597095942, "num_examples": 100000}], "download_size": 0, "dataset_size": 597095942}}
2023-02-12T15:23:46+00:00
f7ae3e9a01c59ffaaa418f7a82af8c732bbf2630
ybendou/few-shot-inaturalist-hf
[ "license:apache-2.0", "region:us" ]
2023-02-12T15:10:04+00:00
{"license": "apache-2.0"}
2023-04-19T13:33:14+00:00
581a521327149965a3c21bfe06d6dacb6217079e
# Dataset Card for "java_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
thmk/java_10
[ "region:us" ]
2023-02-12T15:55:44+00:00
{"dataset_info": {"features": [{"name": "code", "dtype": "string"}, {"name": "repo_name", "dtype": "string"}, {"name": "path", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "license", "dtype": "string"}, {"name": "size", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 605825109, "num_examples": 100000}], "download_size": 195428485, "dataset_size": 605825109}}
2023-02-12T15:56:01+00:00
47a6bb18216a6a0cd17223d23a0ce6fa3a3a6724
# Dataset Card for "class_dataset2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
LFBMS/class_dataset2
[ "region:us" ]
2023-02-12T16:12:48+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "bilanz_datev", "1": "bilanz_lexware", "2": "guv", "3": "other"}}}}], "splits": [{"name": "train", "num_bytes": 13700431777.0, "num_examples": 4000}, {"name": "validation", "num_bytes": 548626720.0, "num_examples": 500}, {"name": "test", "num_bytes": 559045772.0, "num_examples": 500}], "download_size": 5407648855, "dataset_size": 14808104269.0}}
2023-02-12T16:15:51+00:00
95fa88d0a7fc98cbf5454d920dc857b1ff6b2e43
# Dataset Card for "class_dataset_donut2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
LFBMS/class_dataset_donut2
[ "region:us" ]
2023-02-12T16:22:15+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "bilanz_datev", "1": "bilanz_lexware", "2": "guv", "3": "other"}}}}, {"name": "ground_truth", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 559064953.0, "num_examples": 500}, {"name": "train", "num_bytes": 4343890380.0, "num_examples": 4000}, {"name": "validation", "num_bytes": 548645901.0, "num_examples": 500}], "download_size": 5424719748, "dataset_size": 5451601234.0}}
2023-02-12T16:25:06+00:00
cb40061d1c6c6493facce3bb2b4437dccd457efc
# Dataset Card for "VQAv2_sample_validation_embeddings" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Multimodal-Fatima/VQAv2_sample_validation_embeddings
[ "region:us" ]
2023-02-12T16:59:52+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "id", "dtype": "int64"}, {"name": "vision_embeddings", "sequence": "float32"}], "splits": [{"name": "openai_clip_vit_large_patch14", "num_bytes": 158671010.0, "num_examples": 1000}], "download_size": 156748467, "dataset_size": 158671010.0}}
2023-02-12T17:00:15+00:00
d4b38bd416a45b3df41f8dafcf3e2194e85a7c06
Achitha/tamil_eng_data
[ "task_categories:translation", "size_categories:1K<n<10K", "language:ta", "language:en", "region:us" ]
2023-02-12T17:01:41+00:00
{"language": ["ta", "en"], "size_categories": ["1K<n<10K"], "task_categories": ["translation"]}
2023-02-12T18:52:26+00:00
be4dc50651721106d36a4a225597d22374acce25
# Dataset Card for "VQAv2_sample_test_embeddings" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Multimodal-Fatima/VQAv2_sample_test_embeddings
[ "region:us" ]
2023-02-12T17:02:34+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "id", "dtype": "int64"}, {"name": "vision_embeddings", "sequence": "float32"}], "splits": [{"name": "openai_clip_vit_large_patch14", "num_bytes": 160166334.0, "num_examples": 1000}], "download_size": 159038686, "dataset_size": 160166334.0}}
2023-02-12T17:02:45+00:00
e5fbed726ca7289dd04cf9bc106024cc0d3bd648
# Dataset Card for "VQAv2_sample_testdev_embeddings" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Multimodal-Fatima/VQAv2_sample_testdev_embeddings
[ "region:us" ]
2023-02-12T17:04:14+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "id", "dtype": "int64"}, {"name": "vision_embeddings", "sequence": "float32"}], "splits": [{"name": "openai_clip_vit_large_patch14", "num_bytes": 161538407.0, "num_examples": 1000}], "download_size": 160035377, "dataset_size": 161538407.0}}
2023-02-12T17:04:41+00:00
f5bcb32ca4fdfbb9972b39f90ae0212fd78d92db
# Dataset Card for "Sunshine-the-Chicken" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Hatman/Sunshine-the-banta-chicken
[ "region:us" ]
2023-02-12T17:33:15+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 15439966.0, "num_examples": 13}], "download_size": 15318668, "dataset_size": 15439966.0}}
2023-02-28T03:40:30+00:00
0df7d0f25e98595129777372867c29cef9b4ef27
# Neuro CNN Project - Fernando Feltrin # Brain Meningioma images (39 classes) for image classification ## Dataset Description - **More info: [email protected]** ### Dataset Summary A collection of T1, contrast-enhanced, and T2-weighted MRI images of meningiomas sorted according to location in the brain. Images without any type of marking or patient identification, interpreted by radiologists and provided for study purposes. Images are separated by clivus / petroclival, sphenoid / cavernous sinus, anterior cranial fossa, medial cranial fossa, posterior cranial fossa, frontal / frontoparietal, frontotemporal, infratentorial / cerebellar, interhemispheric / suprasellar, intracisternal, intraventricular / parafalkyne, parietal / parietooccipital, supratentorial, temporal/temporoparietal.
fernando2rad/neuro_cnn_meningioma_39c
[ "task_categories:image-classification", "size_categories:1K<n<10K", "license:odc-by", "brain mri images", "brain images", "brain tumor", "brain meningioma", "meningioma mri images", "region:us" ]
2023-02-12T17:52:50+00:00
{"license": "odc-by", "size_categories": ["1K<n<10K"], "task_categories": ["image-classification"], "pretty_name": "Neuro CNN Brain Meningioma 39 classes", "tags": ["brain mri images", "brain images", "brain tumor", "brain meningioma", "meningioma mri images"]}
2023-02-12T18:20:00+00:00
c1a840660e7cd568fa116f967389aa3aee26e364
akadhim-ai/dilbert_and_boss
[ "license:openrail", "region:us" ]
2023-02-12T18:38:47+00:00
{"license": "openrail", "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "train"}}}}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 516609.0, "num_examples": 7}], "download_size": 518209, "dataset_size": 516609.0}}
2023-02-12T18:41:56+00:00
2f84abcda7ff942628a476ab24e6296c35d25199
# Dataset card for personSeg ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset description](#dataset-description) - [Dataset categories](#dataset-categories) ## Dataset description - **Homepage:** https://segments.ai/shahardekel/personSeg This dataset was created using [Segments.ai](https://segments.ai). It can be found [here](https://segments.ai/shahardekel/personSeg). ## Dataset categories | Id | Name | Description | | --- | ---- | ----------- | | 1 | person | - |
shahardekel/personSeg
[ "task_categories:image-segmentation", "region:us" ]
2023-02-12T18:40:19+00:00
{"task_categories": ["image-segmentation"]}
2023-02-14T14:38:54+00:00
fb87cbf30db41c4214e647a6bfac2f1dcb862c3e
RecipePairs dataset, originally from the 2022 EMNLP paper: ["SHARE: a System for Hierarchical Assistive Recipe Editing"](https://aclanthology.org/2022.emnlp-main.761/) by Shuyang Li, Yufei Li, Jianmo Ni, and Julian McAuley. This version (1.5.0) has been updated with 6.9M pairs of `base -> target` recipes, alongside their name overlap, IOU (longest common subsequence / union), and target dietary categories. These cover the 459K recipes from the original GeniusKitcen/Food.com dataset. If you would like to use this data or found it useful in your work/research, please cite the following papers: ``` @inproceedings{li-etal-2022-share, title = "{SHARE}: a System for Hierarchical Assistive Recipe Editing", author = "Li, Shuyang and Li, Yufei and Ni, Jianmo and McAuley, Julian", booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing", month = dec, year = "2022", address = "Abu Dhabi, United Arab Emirates", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2022.emnlp-main.761", pages = "11077--11090", abstract = "The large population of home cooks with dietary restrictions is under-served by existing cooking resources and recipe generation models. To help them, we propose the task of controllable recipe editing: adapt a base recipe to satisfy a user-specified dietary constraint. This task is challenging, and cannot be adequately solved with human-written ingredient substitution rules or existing end-to-end recipe generation models. We tackle this problem with SHARE: a System for Hierarchical Assistive Recipe Editing, which performs simultaneous ingredient substitution before generating natural-language steps using the edited ingredients. By decoupling ingredient and step editing, our step generator can explicitly integrate the available ingredients. Experiments on the novel RecipePairs dataset{---}83K pairs of similar recipes where each recipe satisfies one of seven dietary constraints{---}demonstrate that SHARE produces convincing, coherent recipes that are appropriate for a target dietary constraint. We further show through human evaluations and real-world cooking trials that recipes edited by SHARE can be easily followed by home cooks to create appealing dishes.", } @inproceedings{majumder-etal-2019-generating, title = "Generating Personalized Recipes from Historical User Preferences", author = "Majumder, Bodhisattwa Prasad and Li, Shuyang and Ni, Jianmo and McAuley, Julian", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", month = nov, year = "2019", address = "Hong Kong, China", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/D19-1613", doi = "10.18653/v1/D19-1613", pages = "5976--5982", abstract = "Existing approaches to recipe generation are unable to create recipes for users with culinary preferences but incomplete knowledge of ingredients in specific dishes. We propose a new task of personalized recipe generation to help these users: expanding a name and incomplete ingredient details into complete natural-text instructions aligned with the user{'}s historical preferences. We attend on technique- and recipe-level representations of a user{'}s previously consumed recipes, fusing these {`}user-aware{'} representations in an attention fusion layer to control recipe text generation. Experiments on a new dataset of 180K recipes and 700K interactions show our model{'}s ability to generate plausible and personalized recipes compared to non-personalized baselines.", } ```
lishuyang/recipepairs
[ "task_categories:text-generation", "multilinguality:monolingual", "size_categories:1M<n<10M", "source_datasets:original", "language:en", "license:gpl-3.0", "region:us" ]
2023-02-12T19:29:57+00:00
{"annotations_creators": "no-annotation", "language_creators": "found", "language": "en", "license": "gpl-3.0", "multilinguality": "monolingual", "size_categories": ["1M<n<10M"], "source_datasets": "original", "task_categories": ["text-generation"], "pretty_name": "RecipePairs", "dataset_info": [{"config_name": "1.5.0", "splits": [{"name": "pairs", "num_examples": 6908697}]}]}
2023-03-21T15:12:41+00:00
dc43f14aedeecf18bef82960f5227eb3c3d75bda
Isamu136/big-animal-dataset-with-embedding
[ "license:mit", "region:us" ]
2023-02-12T21:19:13+00:00
{"license": "mit", "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "caption", "dtype": "string"}, {"name": "l14_embeddings", "sequence": "float32"}, {"name": "moco_vitb_imagenet_embeddings", "sequence": "float32"}, {"name": "moco_vitb_imagenet_embeddings_without_last_layer", "sequence": "float32"}], "splits": [{"name": "train", "num_bytes": 2125655956.375, "num_examples": 62149}], "download_size": 2238679414, "dataset_size": 2125655956.375}}
2023-02-12T22:42:07+00:00
0e33fc0e87fa493ddf7ddd45ce1df0c68aa6ec4e
# Dataset Card for "dilbert-short-comic" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
akadhim-ai/dilbert-short-comic
[ "region:us" ]
2023-02-12T22:01:03+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 377934.0, "num_examples": 12}], "download_size": 379115, "dataset_size": 377934.0}}
2023-02-12T22:03:15+00:00
9716a0cf5cd45494ff4afbc0915cd9a92a2b2320
# Dataset Card for "bookcorpus_compact_1024_shard8_of_10_meta" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
saibo/bookcorpus_compact_1024_shard8_of_10_meta
[ "region:us" ]
2023-02-12T23:21:10+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "concept_with_offset", "dtype": "string"}, {"name": "cid_arrangement", "sequence": "int32"}, {"name": "schema_lengths", "sequence": "int64"}, {"name": "topic_entity_mask", "sequence": "int64"}, {"name": "text_lengths", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 7774340762, "num_examples": 61605}], "download_size": 1711444340, "dataset_size": 7774340762}}
2023-02-12T23:38:28+00:00
0135fd0cb402234336e4b31233f260579883b10d
Fiizy/Diizy
[ "license:afl-3.0", "region:us" ]
2023-02-13T00:00:03+00:00
{"license": "afl-3.0"}
2023-02-13T00:00:03+00:00
0677791870a30024336ec2685b29ca1f714764a2
# Dataset Card for "ID-SQuAD" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Rifky/ID-SQuAD
[ "region:us" ]
2023-02-13T01:37:38+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answers", "struct": [{"name": "answer_start", "sequence": "int64"}, {"name": "text", "sequence": "string"}]}], "splits": [{"name": "test", "num_bytes": 12218827, "num_examples": 11858}, {"name": "train", "num_bytes": 121632833, "num_examples": 130318}, {"name": "validation", "num_bytes": 12218827, "num_examples": 11858}], "download_size": 19391596, "dataset_size": 146070487}}
2023-04-08T03:55:02+00:00
74d7b965343554ded234dd792af16eb57351d480
### Top10 sampled news category dataset randomly sampled news data original dataset: https://www.kaggle.com/datasets/rmisra/news-category-dataset ### Value Counts per Category ``` ENTERTAINMENT 10000 POLITICS 10000 WELLNESS 10000 TRAVEL 9900 STYLE & BEAUTY 9814 PARENTING 8791 HEALTHY LIVING 6694 QUEER VOICES 6347 FOOD & DRINK 6340 BUSINESS 5992 ```
heegyu/news-category-balanced-top10
[ "license:cc-by-4.0", "region:us" ]
2023-02-13T02:45:28+00:00
{"license": "cc-by-4.0"}
2023-02-13T02:56:31+00:00
8a54441234b57ebd14b8589deee93b476b29b6f7
suanbio81/Test
[ "license:openrail", "region:us" ]
2023-02-13T04:06:07+00:00
{"license": "openrail"}
2023-02-13T04:06:08+00:00
9fb691283b5a1ecbf1776ef7299f855334da5304
# Dataset Card for "pc_380" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
taldarim/pc_380
[ "region:us" ]
2023-02-13T04:39:33+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "Results interpretation", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}, {"name": "Frameworks usage", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}, {"name": "Algorithms design", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}, {"name": "Algorithms implementation", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}, {"name": "Launching problem", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}, {"name": "Performance issue", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}, {"name": "Feasibility of application development", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}, {"name": "Application availability", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}, {"name": "Device usage", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}, {"name": "Input data usage", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}], "splits": [{"name": "train", "num_bytes": 594651, "num_examples": 383}], "download_size": 281261, "dataset_size": 594651}}
2023-02-13T04:39:44+00:00
0af072d582c2f625185b9b4533b650d0a38ad63f
# Dataset Card for "contract" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
jhn9803/contract
[ "region:us" ]
2023-02-13T04:56:37+00:00
{"dataset_info": {"features": [{"name": "label", "dtype": "int64"}, {"name": "clean_text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2618996, "num_examples": 7982}, {"name": "val", "num_bytes": 323947, "num_examples": 994}], "download_size": 987111, "dataset_size": 2942943}}
2023-02-13T04:57:02+00:00
3273fd9f501028692293b71bc662782448cd4a06
Plachta/sampled_audio4ft
[ "license:apache-2.0", "region:us" ]
2023-02-13T07:06:09+00:00
{"license": "apache-2.0"}
2023-04-17T07:17:59+00:00
cdf105f1bd1d70dce7e5a5d22470d6d118c8926d
[ { "review_id": "1", "product_id": "1234", "reviewer_id": "John Doe", "stars": 4, "review_body": "This product is great!", "review_title": "Fantastic Product!", "language": "en", "product_category": "Electronics" }, { "review_id": "2", "product_id": "5678", "reviewer_id": "Jane Doe", "stars": 5, "review_body": "This product is amazing!", "review_title": "Wonderful Product!", "language": "en", "product_category": "Apparel" }, // ... { "review_id": "1000", "product_id": "2468", "reviewer_id": "John Smith", "stars": 3, "review_body": "This product is alright.", "review_title": "Average Product!", "language": "en", "product_category": "Home & Kitchen" } ]
elhaddajiotmane/dar
[ "region:us" ]
2023-02-13T07:20:36+00:00
{}
2023-02-13T07:21:53+00:00
70780f6ab57b2cf897727755ab4a9ebf226c3c90
# Dataset Card for Dataset Name ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
arguezva/games
[ "region:us" ]
2023-02-13T07:52:01+00:00
{}
2023-02-13T07:52:38+00:00
73dee007b8476fd70c636c8c0f35d424980c360b
SFKs/ff
[ "license:openrail", "region:us" ]
2023-02-13T08:13:15+00:00
{"license": "openrail"}
2023-02-13T08:13:15+00:00
c083b78afbb44fbc89517765ae0f27fc06acdbbc
KarosY/LAION_256_1000_1
[ "region:us" ]
2023-02-13T08:29:39+00:00
{}
2023-02-14T03:57:58+00:00
362c78b668ef3c8d121492ffdf77c83a0f4ecfc2
# Dataset Card for "simplewiki2023" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
lsb/simplewiki2023
[ "region:us" ]
2023-02-13T08:42:44+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 275839740, "num_examples": 225332}], "download_size": 148218428, "dataset_size": 275839740}}
2023-02-13T08:43:12+00:00
e77e98b1d66afc0ec525fd5d031e54c40f491a4c
# Dataset Card for "jva-missions-report-raw" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
jeveuxaider/jva-missions-report-raw
[ "region:us" ]
2023-02-13T09:28:57+00:00
{"dataset_info": {"features": [{"name": "label", "dtype": {"class_label": {"names": {"0": "Valid\u00e9e", "1": "Signal\u00e9e"}}}}, {"name": "idx", "dtype": "int64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 22880285, "num_examples": 16433}], "download_size": 9897212, "dataset_size": 22880285}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-20T09:24:45+00:00
968a50c7856add431897181832ea6604046d0560
whybeyoung/ds1
[ "license:apache-2.0", "doi:10.57967/hf/0362", "region:us" ]
2023-02-13T09:43:13+00:00
{"license": "apache-2.0"}
2023-02-20T06:40:43+00:00
e4d796df61352fc4b50c6f75e9f71602f4e3f7d5
## Example dataset card on playing video inside dataset cards <video loop autoplay controls src="https://huggingface.co/araffin/ppo-LunarLander-v2/resolve/main/replay.mp4"></video> Since datset cards support html, you can just use html video tag: ```html <video loop autoplay controls src="https://huggingface.co/araffin/ppo-LunarLander-v2/resolve/main/replay.mp4"></video> ``` note: change the src to your video. You can uplaod the demo video as part of your dataset as well & use it like `https://huggingface.co/{DATASET_OWNER}/{DATASET_NAME}/resolve/main/{VIDEO_PATH}.mp4`
mishig/tets_rl
[ "region:us" ]
2023-02-13T09:49:56+00:00
{}
2023-02-13T13:13:45+00:00
c9edfe05a41811021c11f67c368f96b8a21a063c
silviaarellano/heightmaps
[ "task_categories:image-to-image", "license:mit", "maps", "heightmaps", "region:us" ]
2023-02-13T09:51:10+00:00
{"license": "mit", "task_categories": ["image-to-image"], "tags": ["maps", "heightmaps"]}
2023-02-13T13:36:46+00:00
fb4178fd1628217ce0ff450f675bb516a1b7f51a
Allen166/dataset1
[ "license:apache-2.0", "region:us" ]
2023-02-13T09:51:32+00:00
{"license": "apache-2.0"}
2023-02-13T10:02:43+00:00
fdcc6cd6a50d0bbf3be08af9beaab5436b73b448
# Dataset Card for Output ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization) - [Who are the source language producers?](#who-are-the-source-language-producers) - [Annotations](#annotations) - [Annotation process](#annotation-process) - [Who are the annotators?](#who-are-the-annotators) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** https://github.com/andstor/lm-output-dataset - **Repository:** https://github.com/andstor/lm-output-dataset - **Paper:** - **Leaderboard:** - **Point of Contact:** [André Storhaug](mailto:[email protected]) ### Dataset Summary This is a dataset of various language model outputs from different datasets. ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@andstor](https://github.com/andstor) for adding this dataset.
andstor/output
[ "task_categories:text-generation", "language:en", "license:mit", "region:us" ]
2023-02-13T10:03:32+00:00
{"language": ["en"], "license": "mit", "task_categories": ["text-generation"], "dataset_info": [{"config_name": "gpt2-xl", "features": [{"name": "id", "dtype": "string"}, {"name": "part", "sequence": "int32"}, {"name": "prompt", "dtype": "string"}, {"name": "reference", "dtype": "string"}, {"name": "prediction", "dtype": "string"}, {"name": "ended", "dtype": "bool"}, {"name": "meta", "struct": [{"name": "subset", "dtype": "string"}]}], "splits": [{"name": "andstor.the_pile_github.greedy", "num_bytes": 60221138, "num_examples": 22169}], "download_size": 66419674, "dataset_size": 60221138}, {"config_name": "EleutherAI.gpt-j-6B", "features": [{"name": "id", "dtype": "string"}, {"name": "part", "sequence": "int32"}, {"name": "prompt", "dtype": "string"}, {"name": "reference", "dtype": "string"}, {"name": "prediction", "dtype": "string"}, {"name": "ended", "dtype": "bool"}, {"name": "meta", "struct": [{"name": "subset", "dtype": "string"}]}], "splits": [{"name": "andstor.the_pile_github.greedy", "num_bytes": 67625587, "num_examples": 20665}], "download_size": 73049509, "dataset_size": 67625587}, {"config_name": "NinedayWang.PolyCoder-2.7B", "features": [{"name": "id", "dtype": "string"}, {"name": "part", "sequence": "int32"}, {"name": "prompt", "dtype": "string"}, {"name": "reference", "dtype": "string"}, {"name": "prediction", "dtype": "string"}, {"name": "ended", "dtype": "bool"}, {"name": "meta", "struct": [{"name": "subset", "dtype": "string"}]}], "splits": [{"name": "andstor.the_pile_github.greedy", "num_bytes": 58822858, "num_examples": 20342}], "download_size": 63717236, "dataset_size": 58822858}, {"config_name": "Salesforce.codegen-16B-multi", "features": [{"name": "id", "dtype": "string"}, {"name": "part", "sequence": "int32"}, {"name": "prompt", "dtype": "string"}, {"name": "reference", "dtype": "string"}, {"name": "prediction", "dtype": "string"}, {"name": "ended", "dtype": "bool"}, {"name": "meta", "struct": [{"name": "subset", "dtype": "string"}]}], "splits": [{"name": "THUDM.humaneval_x.greedy", "num_bytes": 2509745, "num_examples": 820}], "download_size": 2694784, "dataset_size": 2509745}, {"config_name": "openai.gpt-3.5-turbo-0613", "features": [{"name": "id", "dtype": "string"}, {"name": "part", "sequence": "int32"}, {"name": "prompt", "dtype": "string"}, {"name": "reference", "dtype": "string"}, {"name": "prediction", "dtype": "string"}, {"name": "ended", "dtype": "bool"}, {"name": "meta", "struct": [{"name": "subset", "dtype": "string"}]}], "splits": [{"name": "THUDM.humaneval_x.greedy", "num_bytes": 958178, "num_examples": 820}], "download_size": 1067958, "dataset_size": 958178}, {"config_name": "openai.gpt-4-0613", "features": [{"name": "id", "dtype": "string"}, {"name": "part", "sequence": "int32"}, {"name": "prompt", "dtype": "string"}, {"name": "reference", "dtype": "string"}, {"name": "prediction", "dtype": "string"}, {"name": "ended", "dtype": "bool"}, {"name": "meta", "struct": [{"name": "subset", "dtype": "string"}]}], "splits": [{"name": "THUDM.humaneval_x.greedy", "num_bytes": 875401, "num_examples": 820}, {"name": "THUDM.humaneval_x.random", "num_bytes": 906274, "num_examples": 820}], "download_size": 1995455, "dataset_size": 1781675}]}
2023-07-09T13:22:59+00:00
3ec568ef46a1a8695aa951671e6dac6b8fc5a449
# Dataset Card for "doguri" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
leeminxji/doguri
[ "region:us" ]
2023-02-13T10:24:07+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 211325.0, "num_examples": 32}], "download_size": 212377, "dataset_size": 211325.0}}
2023-02-14T02:42:32+00:00
ed141a5e77592e712c3f2a35c20895a23eda2e8b
RVMadhu/distil_bert
[ "task_categories:text-classification", "language:en", "license:other", "region:us" ]
2023-02-13T10:46:46+00:00
{"language": ["en"], "license": "other", "task_categories": ["text-classification"]}
2023-06-14T05:29:29+00:00
6482a3f22172deb6b75c15f8520c9ced9905268b
samreen/Urwiki_PR
[ "license:cc-by-nc-nd-3.0", "region:us" ]
2023-02-13T10:51:29+00:00
{"license": "cc-by-nc-nd-3.0"}
2023-02-13T10:51:29+00:00
05728aa81ddf264926211726ca7f16a2db7f0731
# `voc_superpixels_edge_wt_only_coord_10` ### Dataset Summary | Dataset | Domain | Task | Node Feat. (dim) | Edge Feat. (dim) | Perf. Metric | |---|---|---|---|---|---| | PascalVOC-SP| Computer Vision | Node Prediction | Pixel + Coord (14) | Edge Weight (1 or 2) | macro F1 | | Dataset | # Graphs | # Nodes | μ Nodes | μ Deg. | # Edges | μ Edges | μ Short. Path | μ Diameter |---|---:|---:|---:|:---:|---:|---:|---:|---:| | PascalVOC-SP| 11,355 | 5,443,545 | 479.40 | 5.65 | 30,777,444 | 2,710.48 | 10.74±0.51 | 27.62±2.13 | ## Additional Information ### Dataset Curators * Vijay Prakash Dwivedi ([vijaydwivedi75](https://github.com/vijaydwivedi75)) ### Licensing Information [Custom License](http://host.robots.ox.ac.uk/pascal/VOC/voc2011/index.html) for Pascal VOC 2011 (respecting Flickr terms of use) ### Citation Information ``` @article{dwivedi2022LRGB, title={Long Range Graph Benchmark}, author={Dwivedi, Vijay Prakash and Rampášek, Ladislav and Galkin, Mikhail and Parviz, Ali and Wolf, Guy and Luu, Anh Tuan and Beaini, Dominique}, journal={arXiv:2206.08164}, year={2022} } ```
LRGB/voc_superpixels_edge_wt_only_coord_10
[ "task_categories:graph-ml", "size_categories:1M<n<10M", "lrgb", "region:us" ]
2023-02-13T11:12:15+00:00
{"size_categories": ["1M<n<10M"], "task_categories": ["graph-ml"], "tags": ["lrgb"]}
2023-02-13T13:09:21+00:00
e8424a3f5c1233b821ce5967f34a69a7566694a0
# `voc_superpixels_edge_wt_only_coord_30` ### Dataset Summary | Dataset | Domain | Task | Node Feat. (dim) | Edge Feat. (dim) | Perf. Metric | |---|---|---|---|---|---| | PascalVOC-SP| Computer Vision | Node Prediction | Pixel + Coord (14) | Edge Weight (1 or 2) | macro F1 | | Dataset | # Graphs | # Nodes | μ Nodes | μ Deg. | # Edges | μ Edges | μ Short. Path | μ Diameter |---|---:|---:|---:|:---:|---:|---:|---:|---:| | PascalVOC-SP| 11,355 | 5,443,545 | 479.40 | 5.65 | 30,777,444 | 2,710.48 | 10.74±0.51 | 27.62±2.13 | ## Additional Information ### Dataset Curators * Vijay Prakash Dwivedi ([vijaydwivedi75](https://github.com/vijaydwivedi75)) ### Licensing Information [Custom License](http://host.robots.ox.ac.uk/pascal/VOC/voc2011/index.html) for Pascal VOC 2011 (respecting Flickr terms of use) ### Citation Information ``` @article{dwivedi2022LRGB, title={Long Range Graph Benchmark}, author={Dwivedi, Vijay Prakash and Rampášek, Ladislav and Galkin, Mikhail and Parviz, Ali and Wolf, Guy and Luu, Anh Tuan and Beaini, Dominique}, journal={arXiv:2206.08164}, year={2022} } ```
LRGB/voc_superpixels_edge_wt_only_coord_30
[ "task_categories:graph-ml", "size_categories:1M<n<10M", "lrgb", "region:us" ]
2023-02-13T11:27:27+00:00
{"size_categories": ["1M<n<10M"], "task_categories": ["graph-ml"], "tags": ["lrgb"]}
2023-02-13T13:15:58+00:00
145c9f04fa3e3ef458558546a127a9a97907ed60
## Dataset Description A subset of [the-stack](https://huggingface.co/datasets/bigcode/the-stack) dataset, from 87 programming languages, and 295 extensions. Each language is in a separate folder under `data/` and contains folders of its extensions. We select samples from 20,000 random files of the original dataset, and keep a maximum of 1,000 files per extension. Check this [space](https://huggingface.co/spaces/bigcode/the-stack-inspection) for inspecting this dataset. ## Languages The dataset contains 87 programming languages: ```` 'ada', 'agda', 'alloy', 'antlr', 'applescript', 'assembly', 'augeas', 'awk', 'batchfile', 'bison', 'bluespec', 'c', 'c++', 'c-sharp', 'clojure', 'cmake', 'coffeescript', 'common-lisp', 'css', 'cuda', 'dart', 'dockerfile', 'elixir', 'elm', 'emacs-lisp','erlang', 'f-sharp', 'fortran', 'glsl', 'go', 'groovy', 'haskell','html', 'idris', 'isabelle', 'java', 'java-server-pages', 'javascript', 'julia', 'kotlin', 'lean', 'literate-agda', 'literate-coffeescript', 'literate-haskell', 'lua', 'makefile', 'maple', 'markdown', 'mathematica', 'matlab', 'ocaml', 'pascal', 'perl', 'php', 'powershell', 'prolog', 'protocol-buffer', 'python', 'r', 'racket', 'restructuredtext', 'rmarkdown', 'ruby', 'rust', 'sas', 'scala', 'scheme', 'shell', 'smalltalk', 'solidity', 'sparql', 'sql', 'stan', 'standard-ml', 'stata', 'systemverilog', 'tcl', 'tcsh', 'tex', 'thrift', 'typescript', 'verilog', 'vhdl', 'visual-basic', 'xslt', 'yacc', 'zig' ````` ## Dataset Structure You can specify which language and extension you want to load: ```python # to load py extension of python from datasets import load_dataset load_dataset("bigcode/the-stack-inspection-data", data_dir="data/python/py") DatasetDict({ train: Dataset({ features: ['content', 'lang', 'size', 'ext', 'max_stars_count', 'avg_line_length', 'max_line_length', 'alphanum_fraction'], num_rows: 1000 }) }) ```
bigcode/the-stack-inspection-data
[ "task_categories:text-generation", "task_ids:language-modeling", "language_creators:crowdsourced", "multilinguality:multilingual", "size_categories:unknown", "language:code", "region:us" ]
2023-02-13T11:49:13+00:00
{"annotations_creators": [], "language_creators": ["crowdsourced"], "language": ["code"], "multilinguality": ["multilingual"], "size_categories": ["unknown"], "source_datasets": [], "task_categories": ["text-generation"], "task_ids": ["language-modeling"]}
2023-02-13T19:26:43+00:00
6a9922e2caa328214a5d5da6d186fcfc766f95bd
# `voc_superpixels_edge_wt_coord_feat_10` ### Dataset Summary | Dataset | Domain | Task | Node Feat. (dim) | Edge Feat. (dim) | Perf. Metric | |---|---|---|---|---|---| | PascalVOC-SP| Computer Vision | Node Prediction | Pixel + Coord (14) | Edge Weight (1 or 2) | macro F1 | | Dataset | # Graphs | # Nodes | μ Nodes | μ Deg. | # Edges | μ Edges | μ Short. Path | μ Diameter |---|---:|---:|---:|:---:|---:|---:|---:|---:| | PascalVOC-SP| 11,355 | 5,443,545 | 479.40 | 5.65 | 30,777,444 | 2,710.48 | 10.74±0.51 | 27.62±2.13 | ## Additional Information ### Dataset Curators * Vijay Prakash Dwivedi ([vijaydwivedi75](https://github.com/vijaydwivedi75)) ### Licensing Information [Custom License](http://host.robots.ox.ac.uk/pascal/VOC/voc2011/index.html) for Pascal VOC 2011 (respecting Flickr terms of use) ### Citation Information ``` @article{dwivedi2022LRGB, title={Long Range Graph Benchmark}, author={Dwivedi, Vijay Prakash and Rampášek, Ladislav and Galkin, Mikhail and Parviz, Ali and Wolf, Guy and Luu, Anh Tuan and Beaini, Dominique}, journal={arXiv:2206.08164}, year={2022} } ```
LRGB/voc_superpixels_edge_wt_coord_feat_10
[ "task_categories:graph-ml", "size_categories:1M<n<10M", "lrgb", "region:us" ]
2023-02-13T11:55:48+00:00
{"size_categories": ["1M<n<10M"], "task_categories": ["graph-ml"], "tags": ["lrgb"]}
2023-03-01T10:34:24+00:00
4edf8008ba3f31593fe0b2970b0ab33867e9a16f
# Dataset Card for "sst2-project-dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Zombely/sst2-project-dataset
[ "region:us" ]
2023-02-13T11:58:57+00:00
{"dataset_info": {"features": [{"name": "idx", "dtype": "int32"}, {"name": "sentence", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "negative", "1": "positive"}}}}], "splits": [{"name": "train", "num_bytes": 4570382.866070766, "num_examples": 65749}, {"name": "test", "num_bytes": 106252, "num_examples": 872}, {"name": "validation", "num_bytes": 111220.13392923429, "num_examples": 1600}], "download_size": 3178339, "dataset_size": 4787855.0}}
2023-02-13T11:59:06+00:00
a2e5d63bc8148e1746580080dbf7f3aa09679d80
# `voc_superpixels_edge_wt_only_coord_30` ### Dataset Summary | Dataset | Domain | Task | Node Feat. (dim) | Edge Feat. (dim) | Perf. Metric | |---|---|---|---|---|---| | PascalVOC-SP| Computer Vision | Node Prediction | Pixel + Coord (14) | Edge Weight (1 or 2) | macro F1 | | Dataset | # Graphs | # Nodes | μ Nodes | μ Deg. | # Edges | μ Edges | μ Short. Path | μ Diameter |---|---:|---:|---:|:---:|---:|---:|---:|---:| | PascalVOC-SP| 11,355 | 5,443,545 | 479.40 | 5.65 | 30,777,444 | 2,710.48 | 10.74±0.51 | 27.62±2.13 | ## Additional Information ### Dataset Curators * Vijay Prakash Dwivedi ([vijaydwivedi75](https://github.com/vijaydwivedi75)) ### Licensing Information [Custom License](http://host.robots.ox.ac.uk/pascal/VOC/voc2011/index.html) for Pascal VOC 2011 (respecting Flickr terms of use) ### Citation Information ``` @article{dwivedi2022LRGB, title={Long Range Graph Benchmark}, author={Dwivedi, Vijay Prakash and Rampášek, Ladislav and Galkin, Mikhail and Parviz, Ali and Wolf, Guy and Luu, Anh Tuan and Beaini, Dominique}, journal={arXiv:2206.08164}, year={2022} } ```
LRGB/voc_superpixels_edge_wt_coord_feat_30
[ "task_categories:graph-ml", "size_categories:1M<n<10M", "lrgb", "region:us" ]
2023-02-13T12:08:46+00:00
{"size_categories": ["1M<n<10M"], "task_categories": ["graph-ml"], "tags": ["lrgb"]}
2023-02-13T12:13:11+00:00
e39135bfa15045afb4dcfc4ac8f741e9e6d7248f
# `voc_superpixels_edge_wt_region_boundary_10` ### Dataset Summary | Dataset | Domain | Task | Node Feat. (dim) | Edge Feat. (dim) | Perf. Metric | |---|---|---|---|---|---| | PascalVOC-SP| Computer Vision | Node Prediction | Pixel + Coord (14) | Edge Weight (1 or 2) | macro F1 | | Dataset | # Graphs | # Nodes | μ Nodes | μ Deg. | # Edges | μ Edges | μ Short. Path | μ Diameter |---|---:|---:|---:|:---:|---:|---:|---:|---:| | PascalVOC-SP| 11,355 | 5,443,545 | 479.40 | 5.65 | 30,777,444 | 2,710.48 | 10.74±0.51 | 27.62±2.13 | ## Additional Information ### Dataset Curators * Vijay Prakash Dwivedi ([vijaydwivedi75](https://github.com/vijaydwivedi75)) ### Licensing Information [Custom License](http://host.robots.ox.ac.uk/pascal/VOC/voc2011/index.html) for Pascal VOC 2011 (respecting Flickr terms of use) ### Citation Information ``` @article{dwivedi2022LRGB, title={Long Range Graph Benchmark}, author={Dwivedi, Vijay Prakash and Rampášek, Ladislav and Galkin, Mikhail and Parviz, Ali and Wolf, Guy and Luu, Anh Tuan and Beaini, Dominique}, journal={arXiv:2206.08164}, year={2022} } ```
LRGB/voc_superpixels_edge_wt_region_boundary_10
[ "task_categories:graph-ml", "size_categories:1M<n<10M", "lrgb", "region:us" ]
2023-02-13T12:15:28+00:00
{"size_categories": ["1M<n<10M"], "task_categories": ["graph-ml"], "tags": ["lrgb"]}
2023-02-13T12:26:01+00:00
c81e391e38e4c11de88b004c4a0445387fe00dc9
# `voc_superpixels_edge_wt_region_boundary_30` ### Dataset Summary | Dataset | Domain | Task | Node Feat. (dim) | Edge Feat. (dim) | Perf. Metric | |---|---|---|---|---|---| | PascalVOC-SP| Computer Vision | Node Prediction | Pixel + Coord (14) | Edge Weight (1 or 2) | macro F1 | | Dataset | # Graphs | # Nodes | μ Nodes | μ Deg. | # Edges | μ Edges | μ Short. Path | μ Diameter |---|---:|---:|---:|:---:|---:|---:|---:|---:| | PascalVOC-SP| 11,355 | 5,443,545 | 479.40 | 5.65 | 30,777,444 | 2,710.48 | 10.74±0.51 | 27.62±2.13 | ## Additional Information ### Dataset Curators * Vijay Prakash Dwivedi ([vijaydwivedi75](https://github.com/vijaydwivedi75)) ### Licensing Information [Custom License](http://host.robots.ox.ac.uk/pascal/VOC/voc2011/index.html) for Pascal VOC 2011 (respecting Flickr terms of use) ### Citation Information ``` @article{dwivedi2022LRGB, title={Long Range Graph Benchmark}, author={Dwivedi, Vijay Prakash and Rampášek, Ladislav and Galkin, Mikhail and Parviz, Ali and Wolf, Guy and Luu, Anh Tuan and Beaini, Dominique}, journal={arXiv:2206.08164}, year={2022} } ```
LRGB/voc_superpixels_edge_wt_region_boundary_30
[ "task_categories:graph-ml", "size_categories:1M<n<10M", "lrgb", "region:us" ]
2023-02-13T12:28:28+00:00
{"size_categories": ["1M<n<10M"], "task_categories": ["graph-ml"], "tags": ["lrgb"]}
2023-02-13T12:37:10+00:00
064486a15c7f1aa55d2ed9593c11e3afab2d223c
# Dataset Card for "historical_texts" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Nadav/historical_texts
[ "region:us" ]
2023-02-13T12:42:44+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "file", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 6181333245, "num_examples": 40579}, {"name": "test", "num_bytes": 313259202, "num_examples": 2135}], "download_size": 3761925437, "dataset_size": 6494592447}}
2023-02-13T12:46:56+00:00
4040189d19e7f9dac5b2e4134afc968d765d500a
# Dataset Card for "sq-babi_nli_basic-deduction" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
niv-al/sq-babi_nli_basic-deduction
[ "language:sq", "region:us" ]
2023-02-13T13:58:07+00:00
{"language": ["sq"], "dataset_info": {"features": [{"name": "sentence1", "dtype": "string"}, {"name": "sentence2", "dtype": "string"}, {"name": "labels", "dtype": {"class_label": {"names": {"0": "not-entailed", "1": "entailed"}}}}], "splits": [{"name": "train", "num_bytes": 259042, "num_examples": 1000}, {"name": "validation", "num_bytes": 36917, "num_examples": 144}, {"name": "test", "num_bytes": 37063, "num_examples": 144}], "download_size": 29535, "dataset_size": 333022}}
2023-02-18T19:59:29+00:00
af6d481296045ae333a6baca271e30742d2013aa
# Dataset Card for MNBVC ## Table of Contents - [Dataset Card for MNBVC](#dataset-card-for-mnbvc) - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [数据集介绍](#数据集介绍) - [数据子集](#数据子集) - [数据格式](#数据格式) - [文本数据](#文本数据) - [问答数据](#问答数据) - [Contributions](#contributions) ## Dataset Description - **Homepage:** http://mnbvc.253874.net/ - **Repository:** https://github.com/esbatmop/MNBVC - **Paper:** N/A - **Leaderboard:** N/A - **Point of Contact:** N/A ### 数据集介绍 中文互联网上最古老最神秘(没有之一)的里屋社区于2023.1.1庄重宣布: 在英明神武的里屋管子带领下,决心发挥社区所长(哪都长),帮助开源社区长期更新一份最大的中文互联网语料集。 Huggingface上的MNBVC数据集在逐渐更新中,请到[https://github.com/esbatmop/MNBVC](https://github.com/esbatmop/MNBVC) 获取未完成清洗的更多数据。 可以使用如下脚本加载: ```python from datasets import load_dataset dataset = load_dataset("liwu/MNBVC", 'law_judgement', split='train', streaming=True) next(iter(dataset)) # get the first line ``` ## 数据子集 MNBVC数据集包含数个子集: - `law_judgement`: 来自法律文书的文本。 - `gov_xuexiqiangguo`: 来自学习强国的文本。 - `gov_report`: 来自政府工作报告的文本。 - `co_ann_report`: 企业年报文本。 - `code_metadata`: 代码元数据。 - `qa_zhihu`: 来自知乎的问答数据。 - `qa_wikihow`: 来自wikihow的问答数据。 - `qa_mfa`: 外交部问答数据。 - `news_peoples_daily`: 来自人民日报的文本数据。 - `wikipedia`: 来自维基百科的文本数据。 - `qa_stackexchange`: 来自StackExchange的问答数据。 - `qa_chatgpt`: 使用ChatGPT构造的问答语料,感谢[genggui001](https://github.com/genggui001)贡献语料。 - `math_qa`: 和数学领域有关的问答数据。 - `math_chat`: 和数学领域有关的对话数据数据,可以提升模型Chain of Thought的能力。 - `crawler_oscar`: 从CommonCrawl中清洗出来的通用文本数据。 ## 数据格式 目前MNBVC数据集包含如下几类数据: - 通用文本 - 问答语料 - 代码语料 - 多轮对话 - 论坛语料 - 平行语料 可以在[MNBVC的wiki页面](https://wiki.mnbvc.org/doku.php/%E7%8E%B0%E6%9C%89%E8%AF%AD%E6%96%99%E6%A0%BC%E5%BC%8F)上查看这几类数据的具体格式。 项目早期所上传的数据使用如下格式,以后这一格式会被废弃,相应数据也会重新上传: ```json { "text": datasets.Value("string"), "meta": datasets.Value("string") } ``` ### Contributions Thanks to the [Liwu community](http://mnbvc.253874.net/) for constructing this dataset. Thanks to [silver](https://github.com/silverriver) and [jiaming](https://huggingface.co/Yjiaming) for adding and uploading this dataset to Huggingface.
liwu/MNBVC
[ "task_categories:text-generation", "task_categories:fill-mask", "task_ids:language-modeling", "task_ids:masked-language-modeling", "annotations_creators:other", "language_creators:other", "multilinguality:monolingual", "size_categories:unknown", "source_datasets:original", "language:zh", "license:mit", "region:us" ]
2023-02-13T14:00:47+00:00
{"annotations_creators": ["other"], "language_creators": ["other"], "language": ["zh"], "license": ["mit"], "multilinguality": ["monolingual"], "size_categories": ["unknown"], "source_datasets": ["original"], "task_categories": ["text-generation", "fill-mask"], "task_ids": ["language-modeling", "masked-language-modeling"], "pretty_name": "MNBVC"}
2024-01-13T02:47:24+00:00
1cb3072ca2e972f46a1f63d1ff2c981c5fb5c75e
# Dataset Card for "contract" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
skang187/contract
[ "region:us" ]
2023-02-13T14:25:27+00:00
{"dataset_info": {"features": [{"name": "label", "dtype": "int64"}, {"name": "clean_text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2618442, "num_examples": 7982}, {"name": "val", "num_bytes": 323947, "num_examples": 994}], "download_size": 985710, "dataset_size": 2942389}}
2023-02-13T14:25:46+00:00
3bfd79e598eaab49b8b5ec7397d5c12b0f22416a
# Russian StackOverflow dataset ## Table of Contents - [Table of Contents](#table-of-contents) - [Description](#description) - [Usage](#usage) - [Data Instances](#data-instances) - [Source Data](#source-data) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Licensing Information](#licensing-information) ## Description **Summary:** Dataset of questions, answers, and comments from [ru.stackoverflow.com](https://ru.stackoverflow.com/). **Script:** [create_stackoverflow.py](https://github.com/IlyaGusev/rulm/blob/hf/data_processing/create_stackoverflow.py) **Point of Contact:** [Ilya Gusev]([email protected]) **Languages:** The dataset is in Russian with some programming code. ## Usage Prerequisites: ```bash pip install datasets zstandard jsonlines pysimdjson ``` Loading: ```python from datasets import load_dataset dataset = load_dataset('IlyaGusev/ru_stackoverflow', split="train") for example in dataset: print(example["text_markdown"]) print() ``` ## Data Instances ``` { "question_id": 11235, "answer_count": 1, "url": "https://ru.stackoverflow.com/questions/11235", "score": 2, "tags": ["c++", "сериализация"], "title": "Извлечение из файла, запись в файл", "views": 1309, "author": "...", "timestamp": 1303205289, "text_html": "...", "text_markdown": "...", "comments": { "text": ["...", "...", "author": ["...", "..."], "comment_id": [11236, 11237], "score": [0, 0], "timestamp": [1303205411, 1303205678] }, "answers": { "answer_id": [11243, 11245], "timestamp": [1303207791, 1303207792], "is_accepted": [1, 0], "text_html": ["...", "..."], "text_markdown": ["...", "..."], "score": [3, 0], "author": ["...", "..."], "comments": { "text": ["...", "..."], "author": ["...", "..."], "comment_id": [11246, 11249], "score": [0, 0], "timestamp": [1303207961, 1303207800] } } } ``` You can use this little helper to unflatten sequences: ```python def revert_flattening(records): fixed_records = [] for key, values in records.items(): if not fixed_records: fixed_records = [{} for _ in range(len(values))] for i, value in enumerate(values): fixed_records[i][key] = value return fixed_records ``` The original JSONL is already unflattened. ## Source Data * The data source is the [Russian StackOverflow](https://ru.stackoverflow.com/) website. * Original XMLs: [ru.stackoverflow.com.7z](https://ia600107.us.archive.org/27/items/stackexchange/ru.stackoverflow.com.7z). * Processing script is [here](https://github.com/IlyaGusev/rulm/blob/hf/data_processing/create_stackoverflow.py). ## Personal and Sensitive Information The dataset is not anonymized, so individuals' names can be found in the dataset. Information about the original authors is included in the dataset where possible. ## Licensing Information According to the license of original data, this dataset is distributed under [CC BY-SA 2.5](https://creativecommons.org/licenses/by-sa/2.5/).
IlyaGusev/ru_stackoverflow
[ "task_categories:text-generation", "task_categories:question-answering", "size_categories:100K<n<1M", "language:ru", "license:other", "region:us" ]
2023-02-13T14:32:35+00:00
{"language": ["ru"], "license": "other", "size_categories": ["100K<n<1M"], "task_categories": ["text-generation", "question-answering"], "dataset_info": {"features": [{"name": "question_id", "dtype": "uint32"}, {"name": "url", "dtype": "string"}, {"name": "answer_count", "dtype": "uint32"}, {"name": "text_html", "dtype": "string"}, {"name": "text_markdown", "dtype": "string"}, {"name": "score", "dtype": "int32"}, {"name": "title", "dtype": "string"}, {"name": "tags", "sequence": "string"}, {"name": "views", "dtype": "uint64"}, {"name": "author", "dtype": "string"}, {"name": "timestamp", "dtype": "uint64"}, {"name": "comments", "sequence": [{"name": "text", "dtype": "string"}, {"name": "author", "dtype": "string"}, {"name": "comment_id", "dtype": "uint32"}, {"name": "score", "dtype": "int32"}, {"name": "timestamp", "dtype": "uint64"}]}, {"name": "answers", "sequence": [{"name": "answer_id", "dtype": "uint32"}, {"name": "is_accepted", "dtype": "uint8"}, {"name": "text_html", "dtype": "string"}, {"name": "text_markdown", "dtype": "string"}, {"name": "score", "dtype": "int32"}, {"name": "author", "dtype": "string"}, {"name": "timestamp", "dtype": "uint64"}, {"name": "comments", "sequence": [{"name": "text", "dtype": "string"}, {"name": "author", "dtype": "string"}, {"name": "comment_id", "dtype": "uint32"}, {"name": "score", "dtype": "int32"}, {"name": "timestamp", "dtype": "uint64"}]}]}], "splits": [{"name": "train", "num_bytes": 3013377174, "num_examples": 437604}], "download_size": 670468664, "dataset_size": 3013377174}}
2023-03-09T23:48:16+00:00
644407bfeb0ae4f3068b69d16b96fbfabb1784cb
# Dataset Card for "sq-babi_nli_lists-sets" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
niv-al/sq-babi_nli_lists-sets
[ "language:sq", "region:us" ]
2023-02-13T14:34:55+00:00
{"language": ["sq"], "dataset_info": {"features": [{"name": "sentence1", "dtype": "string"}, {"name": "sentence2", "dtype": "string"}, {"name": "labels", "dtype": {"class_label": {"names": {"0": "not-entailed", "1": "entailed"}}}}], "splits": [{"name": "train", "num_bytes": 242554, "num_examples": 1000}, {"name": "validation", "num_bytes": 34848, "num_examples": 144}, {"name": "test", "num_bytes": 34315, "num_examples": 144}], "download_size": 62573, "dataset_size": 311717}}
2023-02-18T19:59:36+00:00
0f95276a5e81beb4f4d958d50b7ba0bedb18cd43
# Lora - Asbestos_Ceiling ## Dataset Description - **원본** [수상할 정도로 익숙한 석면 천장 로라 공유 및 사용법](https://arca.live/b/aiart/69669397) 석면 **천장** 로라 파일 ## !!사용법!! 그냥 T2I에서 로라 넣고 돌리면 벽까지 침범을 당해서 타율이 매우 떨어짐 천장 쪽만 인페인트해서 돌려야 타율이 좋음 **디노이즈 강도 : 0.5** **<lora:Asbestos Ceiling:2.0>** [다운로드](https://huggingface.co/datasets/AIARTCHAN/lora-Asbestos_Ceiling/resolve/main/Asbestos%20Ceiling.safetensors)
AIARTCHAN/lora-Asbestos_Ceiling
[ "license:creativeml-openrail-m", "lora", "aiartchan", "stable-diffusion", "region:us" ]
2023-02-13T14:40:01+00:00
{"license": "creativeml-openrail-m", "tags": ["lora", "aiartchan", "stable-diffusion"]}
2023-02-13T14:43:24+00:00
87dd5a011e7fb7c02b0213f76e21b0c9fe83f23f
# Dataset Card for "sq-babi_nli_single-supporting-fact" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
niv-al/sq-babi_nli_single-supporting-fact
[ "language:sq", "region:us" ]
2023-02-13T15:05:38+00:00
{"language": ["sq"], "dataset_info": {"features": [{"name": "sentence1", "dtype": "string"}, {"name": "sentence2", "dtype": "string"}, {"name": "labels", "dtype": {"class_label": {"names": {"0": "not-entailed", "1": "entailed"}}}}], "splits": [{"name": "train", "num_bytes": 214663, "num_examples": 1000}, {"name": "validation", "num_bytes": 31319, "num_examples": 144}, {"name": "test", "num_bytes": 30966, "num_examples": 144}], "download_size": 50131, "dataset_size": 276948}}
2023-02-18T19:59:43+00:00