sha
stringlengths
40
40
text
stringlengths
0
13.4M
id
stringlengths
2
117
tags
list
created_at
stringlengths
25
25
metadata
stringlengths
2
31.7M
last_modified
stringlengths
25
25
bde30402c3235395d44663789ce74789fb39cec6
# Dataset Card for "chunk_152" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
one-sec-cv12/chunk_152
[ "region:us" ]
2023-04-10T08:58:18+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 16893402480.375, "num_examples": 175885}], "download_size": 15372378382, "dataset_size": 16893402480.375}}
2023-04-10T09:08:26+00:00
c497dbf55dfbbabf7b1acf289e96033e96f6a298
# Dataset Card for "chunk_154" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
one-sec-cv12/chunk_154
[ "region:us" ]
2023-04-10T08:58:32+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 21954075552.25, "num_examples": 228574}], "download_size": 19501673111, "dataset_size": 21954075552.25}}
2023-04-10T09:17:40+00:00
dc0782bcbd9ddf5d981f715cacc12645184b499f
# Dataset Card for "tdtunlplab_news_summary_2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
nlplabtdtu/summarization_sft
[ "region:us" ]
2023-04-10T09:08:22+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "content", "dtype": "string"}, {"name": "summary", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3804093, "num_examples": 1000}, {"name": "test", "num_bytes": 770548, "num_examples": 200}], "download_size": 2233195, "dataset_size": 4574641}}
2023-04-10T09:09:01+00:00
dde832b2cfa1683fbc21ccebb21b4dfc6a272e05
# Dataset Card for "tdtunlplab_news_summary_2_prompt" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
nlplabtdtu/summarization_sft_prompted
[ "language:vi", "region:us" ]
2023-04-10T09:13:30+00:00
{"language": "vi", "dataset_info": {"features": [{"name": "summary", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3857903, "num_examples": 1000}, {"name": "test", "num_bytes": 781238, "num_examples": 200}], "download_size": 2286819, "dataset_size": 4639141}}
2023-07-15T08:20:27+00:00
257715a33eb2428dccffd96318159c64c6d620a5
# Dataset Card for "chunk_158" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
one-sec-cv12/chunk_158
[ "region:us" ]
2023-04-10T09:20:28+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 19174926672.125, "num_examples": 199639}], "download_size": 17127280603, "dataset_size": 19174926672.125}}
2023-04-10T09:35:59+00:00
079f0dd881d4181a211ca5961a90ff0d9a7c5a58
# Dataset Card for "chunk_155" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
one-sec-cv12/chunk_155
[ "region:us" ]
2023-04-10T09:21:05+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 24901308432.625, "num_examples": 259259}], "download_size": 22581975960, "dataset_size": 24901308432.625}}
2023-04-10T09:45:12+00:00
d9264982b753ea263fc213d482e1ba78d5bc7e68
# Dataset Card for "chunk_156" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
one-sec-cv12/chunk_156
[ "region:us" ]
2023-04-10T09:41:16+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 22243660272.375, "num_examples": 231589}], "download_size": 19717789179, "dataset_size": 22243660272.375}}
2023-04-10T10:12:20+00:00
5d2bf670fa6b1e7db9e5bec1dbeaa54fb3e2123a
# Dataset Card for "celloscope_28000_bangla_ner_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
celloscopeai/celloscope_28000_bangla_ner_dataset
[ "region:us" ]
2023-04-10T09:51:22+00:00
{"dataset_info": {"features": [{"name": "tokens", "sequence": "string"}, {"name": "ner_tags", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 4406091, "num_examples": 22052}, {"name": "validation", "num_bytes": 1118019, "num_examples": 2756}, {"name": "test", "num_bytes": 1101591, "num_examples": 2758}], "download_size": 975274, "dataset_size": 6625701}}
2023-04-10T10:07:28+00:00
744c9efdc6457a117fcaa288e23dc75bdc978982
# GPTeacher A collection of modular datasets generated by GPT-4, General-Instruct - Roleplay-Instruct - Code-Instruct - and Toolformer The General-Instruct used many of the same seed prompts as alpaca, but also had specific examples of things we didnt see much in with alpaca. Such as Chain of Thought Reasoning, Logic Puzzles, Wordplay, Role Playing (lightly), and was asked to include reasoning behind and thought steps where appropriate in example responses, among other things. The General-Instruct dataset is about 20,000 examples with just deduplication. Still cleaning the codegen instruct dataset, will be up when its cleaned. Each dataset is split into 5 separate datasets, based on similarity scored cleaning. Simple dedupe only, and then range of <60% to <90% similarity cleaned sets for each. They are all made to be compliant with Alpaca's dataset format, i.e. each has an instruction, input, and output field, should make it easier to use the same fine tune script and process as alpaca has. Documentation on the toolformers section coming soon, we generated a dataset to use a set of predefined tools, including search, python, terminal/shell, wikipedia, wolfram, and others. More info on prompt format for inference soon..
MentalFox/GPTeacher
[ "region:us" ]
2023-04-10T10:11:17+00:00
{}
2023-04-10T10:12:29+00:00
cc689b69439ceab9596fe2a864151ad7ab78bbeb
AbdulBasitTahir/FiveVolumeCommentaryOfTheHolyQuran
[ "license:afl-3.0", "region:us" ]
2023-04-10T11:15:19+00:00
{"license": "afl-3.0"}
2023-04-10T11:17:27+00:00
ea73501b45cc19be7729401fa6ce70f37461e206
letinnghia/student-feedbacks
[ "license:gpl-2.0", "region:us" ]
2023-04-10T11:32:06+00:00
{"license": "gpl-2.0"}
2023-04-10T12:01:27+00:00
7ededdd77e1e033db2f913a4933f6b92bffe6009
# Dataset Card for "chunk_157" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
one-sec-cv12/chunk_157
[ "region:us" ]
2023-04-10T11:42:33+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 17014134816.25, "num_examples": 177142}], "download_size": 15023435045, "dataset_size": 17014134816.25}}
2023-04-10T12:12:31+00:00
0cf177102e66168515d392673a0aa459cd3011a9
AyoubChLin/CNN_News_Articles_clean
[ "license:apache-2.0", "region:us" ]
2023-04-10T11:51:50+00:00
{"license": "apache-2.0"}
2023-04-10T11:57:26+00:00
b51f29286094a4a933b7308955aa656e4d95a404
# Distil Whisper: AMI IHM This is a variant of the [AMI IHM](https://huggingface.co/datasets/edinburghcstr/ami) dataset, augmented to return the pseudo-labelled Whisper Transcriptions alongside the original dataset elements. The pseudo-labelled transcriptions were generated by labelling the input audio data with the Whisper [large-v2](https://huggingface.co/openai/whisper-large-v2) model with *greedy* sampling. For information on how the original dataset was curated, refer to the original [dataset card](https://huggingface.co/datasets/edinburghcstr/ami). ## Standalone Usage First, install the latest version of the 🤗 Datasets package: ```bash pip install --upgrade pip pip install --upgrade datasets[audio] ``` The dataset can be downloaded and pre-processed on disk using the [`load_dataset`](https://huggingface.co/docs/datasets/v2.14.5/en/package_reference/loading_methods#datasets.load_dataset) function: ```python from datasets import load_dataset dataset = load_dataset("distil-whisper/ami-ihm", "ihm") # take the first sample of the validation set sample = dataset["validation"][0] ``` It can also be streamed directly from the Hub using Datasets' [streaming mode](https://huggingface.co/blog/audio-datasets#streaming-mode-the-silver-bullet). Loading a dataset in streaming mode loads individual samples of the dataset at a time, rather than downloading the entire dataset to disk: ```python from datasets import load_dataset dataset = load_dataset("distil-whisper/ami-ihm", "ihm", streaming=True) # take the first sample of the validation set sample = next(iter(dataset["validation"])) ``` ## Distil Whisper Usage To use this dataset to reproduce a Distil Whisper training run, refer to the instructions on the [Distil Whisper repository](https://github.com/huggingface/distil-whisper#training). ## License This dataset is licensed under cc-by-4.0.
distil-whisper/ami-ihm
[ "task_categories:automatic-speech-recognition", "language:en", "license:cc-by-4.0", "region:us" ]
2023-04-10T11:57:58+00:00
{"language": ["en"], "license": "cc-by-4.0", "task_categories": ["automatic-speech-recognition"], "-pretty_name": "AMI IHM"}
2023-09-25T09:30:14+00:00
dcbf6940d28138abf378fffa6f70db7e6aa0150b
# Dataset Card for "chunk_160" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
one-sec-cv12/chunk_160
[ "region:us" ]
2023-04-10T12:14:34+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 22251248064.5, "num_examples": 231668}], "download_size": 20138689166, "dataset_size": 22251248064.5}}
2023-04-10T12:28:16+00:00
2eeac425ea7319442a606cf5a1e487c30b2fb658
# Dataset Card for "chunk_161" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
one-sec-cv12/chunk_161
[ "region:us" ]
2023-04-10T12:25:43+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 21705503328.75, "num_examples": 225986}], "download_size": 19854151285, "dataset_size": 21705503328.75}}
2023-04-10T12:40:11+00:00
7f28d746a9212691c940ff779b985b15f542228f
# Dataset Card for Quora Chat Dutch ## Dataset Description - **Homepage:** N/A - **Repository:** N/A - **Paper:** N/A - **Leaderboard:** N/A - **Point of Contact:** Bram Vanroy ### Dataset Summary **Update 15/1/2024**: the format of this dataset has been updated so that it now follows the `messages` format, where each conversation is a list of messages (dictionaries) and each dictionary has a role (user, assistant or system) and content key. Importantly, this dataset was also filtered by its language identification tag. We ran LID with [FastText](https://huggingface.co/facebook/fasttext-language-identification) (results are in the separate `3-lid` branch) and filtered on Dutch only. Therefore, the current version contains fewer samples than the original version. Note that this may mean that translation-related tasks or even code questions have been filtered out! Finally, we have made a train/test split (`train_sft` and `test_sft`), following the approach by Hugging Face in e.g. [UltraChat 200k](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k). The original version of the dataset is available in branch `1-original`. This dataset contains 48,761 (43,919 train, 4842 test) conversations between een AI assistant and a (fake) "Human" (generated) in Dutch. They are translations of [Baize](https://github.com/project-baize/baize-chatbot/tree/6790946f638d60fcaf397574189124f15792f35a/data)'s machine-generated answers to the [Quora](https://huggingface.co/datasets/quora) dataset. ☕ [**Want to help me out?**](https://www.buymeacoffee.com/bramvanroy) Translating the data with the OpenAI API, and prompt testing, cost me 💸$135.65💸. If you like this dataset, please consider [buying me a coffee](https://www.buymeacoffee.com/bramvanroy) to offset a portion of this cost, I appreciate it a lot! ☕ If you use this dataset or refer to it, please use the following citation: Vanroy, B. (2023). *Language Resources for Dutch Large Language Modelling*. [https://arxiv.org/abs/2312.12852](https://arxiv.org/abs/2312.12852) ```bibtext @article{vanroy2023language, title={Language Resources for {Dutch} Large Language Modelling}, author={Vanroy, Bram}, journal={arXiv preprint arXiv:2312.12852}, year={2023} } ``` ### Languages - Dutch ## Dataset Structure ### Data Instances ```python { 'messages':[ {'content': 'Ik wil weten hoe ik meer upvotes kan krijgen op mijn berichten.', 'role': 'user'}, {'content': 'Interessante vraag! Heb je geprobeerd om lid te worden van communities of subreddits die gerelateerd zijn aan het onderwerp van je bericht?', 'role': 'assistant'}, {'content': 'Ja, dat heb ik. Maar mijn berichten krijgen nog steeds niet veel upvotes. Heb je nog andere tips?', 'role': 'user'}, {'content': 'Een ding dat je kunt doen is het creëren van hoogwaardige inhoud die interessant en relevant is voor het publiek van de subreddit. Bovendien kun je in contact komen met je publiek door te reageren op de berichten van andere mensen en te reageren op opmerkingen op je eigen berichten.', 'role': 'assistant'}, {'content': 'Dat is een goed punt. Ik heb ook gehoord dat pakkende koppen en goede timing een verschil kunnen maken. Wat denk jij?', 'role': 'user'}, {'content': 'Ja, een pakkende kop hebben en posten op een moment dat je publiek het meest actief is, kan zeker je kansen vergroten om upvotes te krijgen. Maar uiteindelijk komt het allemaal neer op het creëren van kwalitatieve inhoud die aantrekkelijk is voor je publiek.', 'role': 'assistant'}, {'content': 'Ik snap het. Bedankt voor het advies! Heb je nog andere tips om de betrokkenheid bij mijn berichten te verbeteren?', 'role': 'user'}, {'content': "Natuurlijk, je kunt proberen multimedia zoals afbeeldingen of video's te gebruiken om je bericht interessanter te maken, en ook overwegen om samen te werken met andere gebruikers of influencers om je bereik te vergroten. En vergeet niet om je berichten op andere sociale media platforms te promoten om meer kijkers aan te trekken.", 'role': 'assistant'} ], 'prompt': 'Ik wil weten hoe ik meer upvotes kan krijgen op mijn berichten.', 'prompt_id': 'a79a84a83cc11063aaa63becacdf4aaa91866a265e37818a4a9684014b7c85d9' } ``` ### Data Fields - **prompt**: the initial user prompt - **prompt_id**: the unique hash of the prompt - **messages**: list of messages (dictionaries) where each dictionary has a role (user, assistant, or system) and content ## Dataset Creation Both the translations and the topics were translated with OpenAI's API for `gpt-3.5-turbo`. `max_tokens=1024, temperature=0` as parameters. The prompt template to translate the input is: ```python CONVERSATION_TRANSLATION_PROMPT = """You are asked to translate a conversation between an AI assistant and a human from {src_lang} into {tgt_lang}. Here are the requirements that you should adhere to: 1. maintain the format: the conversation consists of the AI (marked as `[|AI|]`) and the human (`[|Human|]`) talking in turns and responding to each other; 2. do not translate the speaker identifiers `[|AI|]` and `[|Human|]` but always copy them into the translation in appropriate places; 3. ensure accurate translation and keep the correctness of the conversation; 4. make sure that text is fluent to read and does not contain grammatical errors. Use standard {tgt_lang} without regional bias; 5. translate the human's text using informal, but standard, language; 6. make sure to avoid biases (such as gender bias, grammatical bias, social bias); 7. if the human asks to correct grammar mistakes or spelling mistakes then you have to generate a similar mistake in {tgt_lang}, and then also generate a corrected output version for the AI in {tgt_lang}; 8. if the human asks to translate text from one to another language, then you only translate the human's question to {tgt_lang} but you keep the translation that the AI provides in the language that the human requested; 9. do not translate code fragments but copy them as they are. If there are English examples, variable names or definitions in code fragments, keep them in English. Now translate the following conversation with the requirements set out above. Do not provide an explanation and do not add anything else.\n\n""" ``` The prompt to translate the topic is: ```python TOPIC_TRANSLATION_PROMPT = "Translate the following title of a conversation from {src_lang} to {tgt_lang} in a succinct," \ " summarizing manner. Translate accurately and formally. Do not provide any explanation" \ " about the translation and do not include the original title.\n\n" ``` The system message was: ``` You are a helpful assistant that translates English to Dutch to the requirements that are given to you. ``` Note that 12 items (0.02%) were not successfully translated. The translation was missing the AI identifier `[|AI|]` and/or the human one `[|Human|]`. The IDs for the missing items are `[1352, 14599, 25985, 27167, 28254, 33731, 41550, 44364, 47293, 49060, 51383, 53740]`. ### Source Data #### Initial Data Collection and Normalization Initial data generation by [Baize](https://github.com/project-baize/baize-chatbot), which in turn relied on [Quora](https://huggingface.co/datasets/quora). #### Who are the source language producers? Quora users and OpenAI's GPT models (via [Baize](https://github.com/project-baize/baize-chatbot)). ## Considerations for Using the Data Note that neither the translations in this new dataset, nor the machine-generated dialogues in the original [Baize](https://github.com/project-baize/baize-chatbot) dataset, have been verified by humans. ### Discussion of Biases As with any machine-generated texts, users should be aware of potential biases that are included in this dataset. Although the prompt specifically includes `make sure to avoid biases (such as gender bias, grammatical bias, social bias)`, of course the impact of such command is not known. It is likely that biases remain in the dataset so use with caution. ### Other Known Limitations The translation quality has not been verified. Use at your own risk! ### Licensing Information This text was generated (either in part or in full) with GPT-3 (`gpt-3.5-turbo`), OpenAI’s large-scale language-generation model. Upon generating draft language, the author reviewed, edited, and revised the language to their own liking and takes ultimate responsibility for the content of this publication. Therefore commercial usage is not allowed. If you use this dataset, you must also follow the [Sharing](https://openai.com/policies/sharing-publication-policy) and [Usage](https://openai.com/policies/usage-policies) policies. ### Contributions Thanks to [Baize](https://github.com/project-baize/baize-chatbot) for the initial machine-generated dialogues, based on [quora](https://huggingface.co/datasets/quora).
BramVanroy/quora-chat-dutch
[ "task_categories:question-answering", "task_categories:text-generation", "size_categories:10K<n<100K", "language:nl", "license:cc-by-nc-4.0", "baize", "quora", "chat", "arxiv:2312.12852", "doi:10.57967/hf/0524", "region:us" ]
2023-04-10T12:27:30+00:00
{"language": ["nl"], "license": "cc-by-nc-4.0", "size_categories": ["10K<n<100K"], "task_categories": ["question-answering", "text-generation"], "pretty_name": "Quora Chat Dutch", "tags": ["baize", "quora", "chat"], "dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "prompt_id", "dtype": "string"}, {"name": "messages", "list": [{"name": "content", "dtype": "string"}, {"name": "role", "dtype": "string"}]}], "splits": [{"name": "train_sft", "num_bytes": 67873239, "num_examples": 43919}, {"name": "test_sft", "num_bytes": 7472429, "num_examples": 4842}], "download_size": 41224462, "dataset_size": 75345668}, "configs": [{"config_name": "default", "data_files": [{"split": "train_sft", "path": "data/train_sft-*"}, {"split": "test_sft", "path": "data/test_sft-*"}]}]}
2024-01-15T11:10:38+00:00
ab6f33b62c8f77b5ff686a6925b34a6a3d9c7655
# Dataset Card for "chunk_162" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
one-sec-cv12/chunk_162
[ "region:us" ]
2023-04-10T12:33:19+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 21615890544.375, "num_examples": 225053}], "download_size": 19718004346, "dataset_size": 21615890544.375}}
2023-04-10T12:51:48+00:00
14d78454d0ae41df33b52efd9ad705d79948985b
# Dataset Card for "chunk_166" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
one-sec-cv12/chunk_166
[ "region:us" ]
2023-04-10T12:53:44+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 21963392208.125, "num_examples": 228671}], "download_size": 20654079336, "dataset_size": 21963392208.125}}
2023-04-10T13:15:21+00:00
318520822474c100b4ce6cc9bb932fd59abfc922
mgg/sd_misc
[ "license:mit", "region:us" ]
2023-04-10T13:02:41+00:00
{"license": "mit"}
2023-04-10T13:02:41+00:00
5b739216c7bbdbe0fc35a9c0b6c2e03f952bed81
# Dataset Card for "SheepsCaption" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
GreeneryScenery/SheepsCaption
[ "region:us" ]
2023-04-10T13:21:10+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "conditioning_image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 736242074.62, "num_examples": 22719}], "download_size": 1321751813, "dataset_size": 736242074.62}}
2023-04-10T13:23:41+00:00
da545e7728a6493ff84147d649e78f73c6a24e96
# Dataset Card for "loginui" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Hahalol/loginui
[ "region:us" ]
2023-04-10T13:47:20+00:00
{"dataset_info": {"features": [{"name": "filename", "dtype": "string"}, {"name": "description", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 7373, "num_examples": 61}], "download_size": 4423, "dataset_size": 7373}}
2023-04-10T16:25:47+00:00
06ba16ef51d2ffaaca0ac2fb2f8beb510c84bcee
# Dataset Card for "chunk_168" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
one-sec-cv12/chunk_168
[ "region:us" ]
2023-04-10T13:52:03+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 25019927712.25, "num_examples": 260494}], "download_size": 23914640600, "dataset_size": 25019927712.25}}
2023-04-10T14:05:45+00:00
e87855ffdbc9f62b9b3110873d00e5044773166e
# Dataset Card for "chunk_165" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
one-sec-cv12/chunk_165
[ "region:us" ]
2023-04-10T13:55:49+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 22107656304.375, "num_examples": 230173}], "download_size": 20363526755, "dataset_size": 22107656304.375}}
2023-04-10T14:27:41+00:00
2b42dd33c7a0e137417cbe78513b3dc1ddebc768
# Dataset Card for "0-9up-ft_ensemble_distilled_mfcc" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mazkooleg/0-9up_ft_ensemble_distilled_mfcc
[ "region:us" ]
2023-04-10T13:56:13+00:00
{"dataset_info": {"features": [{"name": "label", "dtype": {"class_label": {"names": {"0": "zero", "1": "one", "2": "two", "3": "three", "4": "four", "5": "five", "6": "six", "7": "seven", "8": "eight", "9": "nine", "10": "#unk#", "11": "#pub#"}}}}, {"name": "logits", "sequence": "float32"}, {"name": "mfcc", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 5630767200, "num_examples": 1095480}, {"name": "validation", "num_bytes": 17311520, "num_examples": 3368}, {"name": "test", "num_bytes": 19393220, "num_examples": 3773}], "download_size": 5800647276, "dataset_size": 5667471940}}
2023-04-10T14:19:28+00:00
b3d05e4f6e6c73a2640c2656a3d1052b1c81960a
# Dataset Card for "SheepsCanny" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
GreeneryScenery/SheepsCanny
[ "region:us" ]
2023-04-10T14:01:08+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "conditioning_image", "dtype": "image"}, {"name": "text", "dtype": "string"}, {"name": "conditioning_image_2", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 1507768570.06, "num_examples": 22719}], "download_size": 1290896004, "dataset_size": 1507768570.06}}
2023-04-10T14:06:47+00:00
5596b5ae3de10d5a84ce5051818bdad74e95dfb6
# Dataset Card for "chunk_167" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
one-sec-cv12/chunk_167
[ "region:us" ]
2023-04-10T14:08:40+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 23174365392.125, "num_examples": 241279}], "download_size": 20849659760, "dataset_size": 23174365392.125}}
2023-04-10T14:41:20+00:00
46ef024a702793ebfb0eb85fadb5bb7313121361
# Dataset Card for "scipapers_1kseq" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
achang/scipapers_1kseq
[ "region:us" ]
2023-04-10T14:09:25+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 2375118680, "num_examples": 322961}, {"name": "validation", "num_bytes": 95859057, "num_examples": 13069}], "download_size": 701768640, "dataset_size": 2470977737}}
2023-04-10T14:10:47+00:00
322394276729c6a24af07d70ccc7c883c5732593
# Dataset Card for "JustSumAI_cleaned_gpt2_data" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Dragonoverlord3000/JustSumAI_cleaned_gpt2_data
[ "region:us" ]
2023-04-10T14:17:50+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2598153279, "num_examples": 30256635}, {"name": "validation", "num_bytes": 1996, "num_examples": 21}], "download_size": 712993372, "dataset_size": 2598155275}}
2023-06-05T16:50:47+00:00
79d5df9e8d719fcdb8195eabfe81fba349d5fd73
AyoubChLin/Bertopic_result_cnn_news_train
[ "license:apache-2.0", "region:us" ]
2023-04-10T14:28:06+00:00
{"license": "apache-2.0"}
2023-04-10T15:07:59+00:00
d5c299f1d8cb82c71c68b2db2911d77659ff3a12
ms903/visinger
[ "license:mit", "region:us" ]
2023-04-10T14:34:55+00:00
{"license": "mit"}
2023-07-15T12:04:45+00:00
4a543be08452ecab32d73d769901db684d8aff00
# Dataset Card for "chunk_159" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
one-sec-cv12/chunk_159
[ "region:us" ]
2023-04-10T14:37:31+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 23271469920.75, "num_examples": 242290}], "download_size": 20920889459, "dataset_size": 23271469920.75}}
2023-04-10T15:09:54+00:00
af8809cbe9d999a4e3506a21af8de29eff74a3c9
# Multilingual e-SNLI (MLe-SNLI) In this repo, we provide the training, validation, and testing sets for **M**ulti**l**ingual **e-SNLI** (MLe-SNLI). For more details, find our report [here](https://github.com/rish-16/cs4248-project/blob/main/CS4248_Group19_Final_Report.pdf). ## Dataset details MLe-SNLI contains 500K training (`train`) samples of premise-hypothesis pairs along with their associated label and explanation. We take 100K training samples from the original e-SNLI (Camburu et al., 2018) dataset and translate them into 4 other languages (Spanish, German, Dutch, and French). We do the same for all 9824 testing (`test`) and validation (`dev`) samples, giving us 49120 samples for both `test` and `dev` splits. | Column | Description | |-----------------|---------------------------------------------------------------------------------| | `premise` | Natural language premise sentence | | `hypothesis` | Natural language hypothesis sentence | | `label` | From `entailment`, `contradiction`, or `neutral` | | `explanation_1` | Natural language justification for `label` | | `language` | From English (`en`), Spanish (`es`), German (`de`), Dutch (`nl`), French (`fr`) | > **WARNING:** the translation quality of MLe-SNLI may be compromised for some natural language samples because of quality issues in the original e-SNLI dataset that were not addressed in our [work](https://github.com/rish-16/cs4248-project). Use it at your own discretion. ## Download Instructions To access MLe-SNLI, you can use the HuggingFace Datasets API to load the dataset: ```python from datasets import load_dataset mle_snli = load_dataset("rish16/MLe-SNLI") # loads a DatasetDict object train_data = mle_snli['train'] # 500K samples (100K per lang) dev_data = mle_snli['dev'] # 49120 samples (9824 per lang) test_data = mle_snli['test'] # 49120 samples (9824 per lang) print (mle_snli) """ DatasetDict({ train: Dataset({ features: ['premise', 'hypothesis', 'label', 'explanation_1', 'language'], num_rows: 500000 }) test: Dataset({ features: ['premise', 'hypothesis', 'label', 'explanation_1', 'language'], num_rows: 49120 }) validation: Dataset({ features: ['premise', 'hypothesis', 'label', 'explanation_1', 'language'], num_rows: 49210 }) }) """ ```
rish16/MLe-SNLI
[ "license:mit", "region:us" ]
2023-04-10T14:39:34+00:00
{"license": "mit"}
2023-04-12T04:57:42+00:00
8cca6262528b37b4d74f5fd2e272b578db24cb34
extent the `TSSB-3M` dataset with more commit info(commit message、source code files...) # sample ```python { "project": "ngraph", "commit_sha": "1445e0684fbcca2ec49a5f1becf1345159b7ba6a", "parent_sha": "4eb8eed57e506e8a2745b298340666e9d7e5ce58", "file_path": "ngraph/op_graph/op_graph.py", "project_url": "https://github.com/rsumner31/ngraph", "likely_bug": true, "comodified": false, "in_function": true, "diff": "@@ -787,7 +787,7 @@ def set_item(tensor, item, value):\n sl = slice(sl)\n start, end, step = sl.indices(l)\n if step <= 0:\n- raise ValueError('Invalid slice in item {}'.format(item))\n+ raise ValueError('Invalid slice (negative step) in item {}'.format(item))\n return assign(tensor_slice(tensor, item, axes=value.axes), value)\n \n \n", "before": "raise ValueError ( 'Invalid slice in item {}' . format ( item ) )", "after": "raise ValueError ( 'Invalid slice (negative step) in item {}' . format ( item ) )", "sstub_pattern": "CHANGE_STRING_LITERAL", "edit_script": "[[\"Update\", [\"string:'Invalid slice in item {}'\", 3, 30, 3, 56], \"'Invalid slice (negative step) in item {}'\"]]", "key": "rsumner31/ngraph@1445e0684fbcca2ec49a5f1becf1345159b7ba6a", "commit_message": "Better error description.", "files": [ { "sha": "52e76a0a5acb043db75592be1bdd09fc6fedf932", "filename": "ngraph/op_graph/op_graph.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/rsumner31/ngraph/blob/1445e0684fbcca2ec49a5f1becf1345159b7ba6a/ngraph%2Fop_graph%2Fop_graph.py", "raw_url": "https://github.com/rsumner31/ngraph/raw/1445e0684fbcca2ec49a5f1becf1345159b7ba6a/ngraph%2Fop_graph%2Fop_graph.py", "contents_url": "https://api.github.com/repos/rsumner31/ngraph/contents/ngraph%2Fop_graph%2Fop_graph.py?ref=1445e0684fbcca2ec49a5f1becf1345159b7ba6a", "patch": "@@ -787,7 +787,7 @@ def set_item(tensor, item, value):\n sl = slice(sl)\n start, end, step = sl.indices(l)\n if step <= 0:\n- raise ValueError('Invalid slice in item {}'.format(item))\n+ raise ValueError('Invalid slice (negative step) in item {}'.format(item))\n return assign(tensor_slice(tensor, item, axes=value.axes), value)\n \n " } ], "find_commit": 1 } ``` # Reference * [1]. Richter, Cedric, and Heike Wehrheim. "TSSB-3M: Mining single statement bugs at massive scale." Proceedings of the 19th International Conference on Mining Software Repositories. 2022
zirui3/TSSB-3M-ext
[ "license:cc-by-4.0", "region:us" ]
2023-04-10T14:58:11+00:00
{"license": "cc-by-4.0"}
2023-05-25T09:22:52+00:00
b7d1ca325a09ee84717c11de2152230d4218e58f
# Dataset Card for "yelp_restaurant_review_labelled" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) # More info about the dataset dataset downloaded from [Yelp](https://www.yelp.com/dataset/download) ### labelling if review star < 3 is 0 (negative)\ else if review star == 3 is 1 (neutral)\ else if review star > 3 is 2 (positive)
mrcaelumn/yelp_restaurant_review_labelled
[ "region:us" ]
2023-04-10T14:58:14+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "negative", "1": "neutral", "2": "positive"}}}}], "splits": [{"name": "train", "num_bytes": 2282684498, "num_examples": 4111534}, {"name": "test", "num_bytes": 571038991, "num_examples": 1027884}], "download_size": 0, "dataset_size": 2853723489}}
2023-04-12T05:40:00+00:00
bb028b06a0d04e4beefa5c4d9b1b08628a00b044
# Dataset Card for "chunk_164" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
one-sec-cv12/chunk_164
[ "region:us" ]
2023-04-10T15:05:01+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 21296819088.625, "num_examples": 221731}], "download_size": 19535011455, "dataset_size": 21296819088.625}}
2023-04-10T15:34:39+00:00
14e127abe1ca7786ea82faa336d0115ca0326b8f
# Dataset Card for "chunk_170" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
one-sec-cv12/chunk_170
[ "region:us" ]
2023-04-10T15:20:22+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 23363195760.375, "num_examples": 243245}], "download_size": 21013363110, "dataset_size": 23363195760.375}}
2023-04-10T15:35:53+00:00
a3662a1ab411dc1c56c7f7f98d2c45cf3dac2f05
# Dataset Card for "Telugu_ASR_corpus" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
eswardivi/Telugu_ASR_corpus
[ "region:us" ]
2023-04-10T15:34:13+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4936808328.336, "num_examples": 7061}, {"name": "test", "num_bytes": 334026532.0, "num_examples": 472}], "download_size": 3683571632, "dataset_size": 5270834860.336}}
2023-04-10T15:46:56+00:00
49ba97d5b5b6c4cfe187eae461184db29000aa3f
# Dataset Card for "chunk_169" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
one-sec-cv12/chunk_169
[ "region:us" ]
2023-04-10T15:53:01+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 23335822080.0, "num_examples": 242960}], "download_size": 21149772594, "dataset_size": 23335822080.0}}
2023-04-10T16:07:22+00:00
5e9b00304d6ea48529d380e06419bf0f2fe23544
# A dataset of mining engineering generated with ChatGPT & BinGPT I take as base the [colorado school of mines - mining engineering syllabus](https://catalog.mines.edu/undergraduate/programs/miningengineering/miningengineering.pdf)
Yairama/alpaca_miner_dataset
[ "license:gpl-3.0", "region:us" ]
2023-04-10T16:00:22+00:00
{"license": "gpl-3.0"}
2023-04-11T06:05:13+00:00
fba266afdd27d33d639118c87e27855f1cc5a49d
# Usage: ```python from datasets import load_dataset proposals = load_dataset("gnumanth/tc39", split="train") ``` ```python print(proposals[0]) { 'url': 'https://github.com/tc39/proposal-array-is-template-object', 'edition': None, 'authors': ['Mike Samuel', 'Krzysztof Kotowicz'], 'champions': ['Krzysztof Kotowicz'], 'description': 'TC39 proposal to identify template strings', 'id': 'proposal-array-is-template-object', 'rationale': None, 'name': 'Array.isTemplateObject', 'has-specification': True, 'notes': [{'date': '2019-12-04T00:00:00.000Z', 'url': 'https://github.com/tc39/notes/blob/HEAD/meetings/2019-12/december-4.md#arrayistemplateobject-update'}, {'date': '2019-12-04T00:00:00.000Z', 'url': 'https://github.com/tc39/notes/blob/HEAD/meetings/2019-12/december-4.md'}, {'date': '2021-01-25T00:00:00.000Z', 'url': 'https://github.com/tc39/notes/blob/HEAD/meetings/2021-01/jan-25.md'}], 'pushed_at': '2021-01-25T20:25:40.000Z', 'stage': 2, 'tags': ['ECMA-262'] } ```
gnumanth/tc39
[ "license:mit", "region:us" ]
2023-04-10T16:06:06+00:00
{"license": "mit", "dataset_info": {"features": [{"name": "url", "dtype": "string"}, {"name": "edition", "dtype": "int64"}, {"name": "authors", "sequence": "string"}, {"name": "champions", "sequence": "string"}, {"name": "description", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "rationale", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "has-specification", "dtype": "bool"}, {"name": "notes", "list": [{"name": "date", "dtype": "string"}, {"name": "url", "dtype": "string"}]}, {"name": "pushed_at", "dtype": "string"}, {"name": "stage", "dtype": "int64"}, {"name": "tags", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 117768.0, "num_examples": 201}, {"name": "test", "num_bytes": 39256.0, "num_examples": 67}], "download_size": 66630, "dataset_size": 157024.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]}
2023-11-18T20:27:13+00:00
234648fb8a4df9a17289f5776af09e0c831c1f8b
# Dataset Card for "martin_valen_dataset_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
akadhim-ai/martin_valen_dataset_10
[ "region:us" ]
2023-04-10T16:26:09+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 82775.0, "num_examples": 10}], "download_size": 82229, "dataset_size": 82775.0}}
2023-04-10T16:26:14+00:00
87bd5d9bdd59f857fb6c845bdccc3921f0977d77
# Dataset Card for Regulatory Impact Analysis (RIA) data on Portuguese regulation ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary [More Information Needed] ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
victorcosta/ria_pt__proems_format
[ "task_categories:text-classification", "multilinguality:monolingual", "language:pt", "region:us" ]
2023-04-10T17:21:45+00:00
{"annotations_creators": "expert-generated", "language": "pt", "multilinguality": "monolingual", "task_categories": ["text-classification"], "pretty_name": "Regulatory Impact Analysis (RIA) data on Portuguese regulation", "configs": ["labeled", "unlabeled"]}
2023-05-10T18:02:29+00:00
f9f26581bc51d2d27129812478675e49420ad4b1
# Dataset Card for "peft-dependents" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
open-source-metrics/peft-dependents
[ "region:us" ]
2023-04-10T17:37:32+00:00
{"dataset_info": {"features": [{"name": "name", "dtype": "string"}, {"name": "stars", "dtype": "int64"}, {"name": "forks", "dtype": "int64"}], "splits": [{"name": "package", "num_bytes": 213, "num_examples": 6}, {"name": "repository", "num_bytes": 3509, "num_examples": 83}], "download_size": 6133, "dataset_size": 3722}}
2024-02-16T18:08:12+00:00
97232c99ff8b34ff0a04951468e08a0bcfd69ffb
IlyaGusev/ru_sharegpt_cleaned
[ "task_categories:conversational", "task_categories:text-generation", "size_categories:n<1K", "language:ru", "region:us" ]
2023-04-10T17:40:47+00:00
{"language": ["ru"], "size_categories": ["n<1K"], "task_categories": ["conversational", "text-generation"], "dataset_info": {"features": [{"name": "messages", "sequence": [{"name": "role", "dtype": "string"}, {"name": "content", "dtype": "string"}]}, {"name": "id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1993005, "num_examples": 273}], "download_size": 2054401, "dataset_size": 1993005}}
2023-09-16T13:34:28+00:00
62de7f0fbadb5b5349d1001e5b0ed919ec71ea65
# Dataset Card for "chunk_171" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
one-sec-cv12/chunk_171
[ "region:us" ]
2023-04-10T17:55:57+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 22432394592.75, "num_examples": 233554}], "download_size": 19567757592, "dataset_size": 22432394592.75}}
2023-04-10T18:25:55+00:00
da03689c4b1e5e48b0b9b5e6bd6ac15e4de68429
# Dataset Card for "speeches-separated-by-idx" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Sleoruiz/speeches-separated-by-idx
[ "region:us" ]
2023-04-10T17:56:25+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "gaceta_numero", "dtype": "string"}, {"name": "fecha_gaceta", "dtype": "string"}, {"name": "comision", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "idx", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 185409277, "num_examples": 149249}], "download_size": 93663216, "dataset_size": 185409277}}
2023-04-10T23:05:28+00:00
bcbba5340ef2e8358a1c876dacee0639189eefef
# Dataset Card for "poses-controlnet-dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
jeromeku/poses-controlnet-dataset
[ "region:us" ]
2023-04-10T18:12:06+00:00
{"dataset_info": {"features": [{"name": "original_image", "dtype": "image"}, {"name": "condtioning_image", "dtype": "image"}, {"name": "overlaid", "dtype": "image"}, {"name": "caption", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 123997204.0, "num_examples": 496}], "download_size": 124012880, "dataset_size": 123997204.0}}
2023-04-10T18:12:12+00:00
d51d395fc2bcb93988e765a01d25825444b613db
# Dataset Card for "chunk_173" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
one-sec-cv12/chunk_173
[ "region:us" ]
2023-04-10T18:37:07+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 19216995696.375, "num_examples": 200077}], "download_size": 17278326460, "dataset_size": 19216995696.375}}
2023-04-10T18:54:09+00:00
82cbce189c20cf111daaa2e8e8f6de27563f9102
# Persian_ChatBot_dataset_Fine_Tuning_Alpaca_Model Persian ChatBot dataset, fine-tune LLaMa on instructed data (preprocessed alpaca dataset). [GitHub](https://github.com/AliEdalat/ChatBot_for_persian_LLaMA_fine_tune.git) - we use [preprocessed alpaca dataset](https://github.com/thisserand/alpaca-lora-finetune-language.git) as a dataset. we translate no_translate data to persian with [mt5](https://huggingface.co/persiannlp/mt5-large-parsinlu-translation_en_fa). ([train dataset](https://huggingface.co/datasets/AliEdalat/Persian_ChatBot_dataset_Fine_Tuning_Alpaca_Model/tree/main) and [test data](https://huggingface.co/datasets/AliEdalat/Persian_ChatBot_dataset_Fine_Tuning_Alpaca_Model/tree/main) with 2k example is ready) - we use LLaMA as a generative model for creating a chatbot model. we fine-tune the model with our Persian dataset and test it. - for improving ChatBot performance, replace "برای اینکه این کار را بکنم" with ""
AliEdalat/Persian_ChatBot_dataset_Fine_Tuning_Alpaca_Model
[ "task_categories:text-generation", "task_categories:conversational", "size_categories:1K<n<10K", "language:fa", "license:apache-2.0", "region:us" ]
2023-04-10T18:39:35+00:00
{"language": ["fa"], "license": "apache-2.0", "size_categories": ["1K<n<10K"], "task_categories": ["text-generation", "conversational"]}
2023-04-13T19:48:07+00:00
89cbf2f0181a73c53af6da3f6480fe0f7e734927
# Dataset Card for "lyrr-taylorswift" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
adabingw/lyrr-taylorswift
[ "region:us" ]
2023-04-10T18:45:39+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1899803, "num_examples": 991}], "download_size": 860281, "dataset_size": 1899803}}
2023-04-10T19:09:02+00:00
1a3d4d1024b524c52515186f2cf17b6826f76496
mazah/faqEmbeddings
[ "license:afl-3.0", "region:us" ]
2023-04-10T19:08:42+00:00
{"license": "afl-3.0"}
2023-04-10T19:10:17+00:00
61ac88cc557f00ba44aa78190116b185bb4b8ac7
# Dataset Card for Dataset Name ## Dataset Description - **Homepage:** https://github.com/NASA-Tournament-Lab/CDC-NLP-Occ-Injury-Coding - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary Dataset used in 2018 NASA-Tournament-Lab organized in collaboration with National Institute for Occupational Safety and Health (NIOSH), The goal of the competition was to classify unstructured free-text “injury narratives” recorded in surveillance systems into injury codes from the Occupational Injuries and Illnesses Classification System (OIICS) ### Supported Tasks and Leaderboards This is a multi-class text classification task.The training dataset includes 48 classifiable event codes distributed across 7 categories: * Violence and other injuries by persons and animals * Transportation incidents * Fires and explosions * Falls, slips, and trips * Exposure to harmful substances or environments * Contact with objects and equipment * Overexertion and bodily reaction For example the text 'DOING UNSPECIFIED LIFTING AT WORK AND DEVELOPED PAIN ACROSS CHEST CHEST PAIN' is coded by 71 which means 'Overexertion involving outside sources' More details on the categories and event codes can be found at: https://wwwn.cdc.gov/wisards/oiics/Trees/MultiTree.aspx?TreeType=Event ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields * text: injury narrative description * sex : sex * age : age * event : OIICS code, which is the classfication label ### Data Splits | name |train|test|full_dataset | |----------|----:|-----------:|----:| |plain_text|153,956| 75,864|229,820| ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
mayerantoine/injury-narrative-coding
[ "task_categories:text-classification", "size_categories:100K<n<1M", "language:en", "license:apache-2.0", "medical", "region:us" ]
2023-04-10T19:19:41+00:00
{"language": ["en"], "license": "apache-2.0", "size_categories": ["100K<n<1M"], "task_categories": ["text-classification"], "tags": ["medical"]}
2023-04-18T14:17:09+00:00
07129b3079b94f326f17c224daf0190fb5865ed1
cheider/gorilla_munch
[ "license:mit", "region:us" ]
2023-04-10T19:32:05+00:00
{"license": "mit"}
2023-04-10T19:32:05+00:00
9654f72ae6a035f2d6243798156fed4e2c15df22
# Dataset Card for "flirty_or_not" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ieuniversity/flirty_or_not
[ "region:us" ]
2023-04-10T19:42:32+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "neutral", "1": "flirty"}}}}, {"name": "texts", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 102704, "num_examples": 1584}, {"name": "test", "num_bytes": 20642, "num_examples": 318}, {"name": "validation", "num_bytes": 14111, "num_examples": 212}], "download_size": 95358, "dataset_size": 137457}}
2023-04-10T19:42:38+00:00
ccb970b124db6cde22cc256fda9a571b320e331c
# Dataset Card for "chunk_175" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
one-sec-cv12/chunk_175
[ "region:us" ]
2023-04-10T19:56:39+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 18330856848.625, "num_examples": 190851}], "download_size": 16613815066, "dataset_size": 18330856848.625}}
2023-04-10T20:12:12+00:00
9d7b177bbf1409391fda2c69a0f3e661419560b3
# Dataset Card for "chunk_178" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
one-sec-cv12/chunk_178
[ "region:us" ]
2023-04-10T20:00:53+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 21354543936.5, "num_examples": 222332}], "download_size": 20129058055, "dataset_size": 21354543936.5}}
2023-04-10T20:19:34+00:00
78f91e0ded784253b11626d5303b85b47a51cb5d
# Dataset Card for "chunk_174" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
one-sec-cv12/chunk_174
[ "region:us" ]
2023-04-10T20:02:50+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 21068705088.5, "num_examples": 219356}], "download_size": 19201696100, "dataset_size": 21068705088.5}}
2023-04-10T20:23:08+00:00
ec687c3a67653405f42d18c0be6f13f66bd4a813
# Dataset Card for [GPT4All-J Prompt Generations] ## Dataset Description Dataset used to train [GPT4All-J](https://huggingface.co/nomic-ai/gpt4all-j) and [GPT4All-J-LoRA](https://huggingface.co/nomic-ai/gpt4all-j-lora) We release several versions of datasets - **v1.0:** The original dataset we used to finetune GPT-J on - **v1.1-breezy**: A filtered dataset where we removed all instances of `AI language model` - **v1.2-jazzy**: A filtered dataset where we also removed instances like `I'm sorry, I can't answer...` and `AI language model` - **v1.3-groovy**: The v1.2 dataset with ShareGPT and Dolly added with ~8% of semantic duplicates removed from the dataset using [Atlas](https://atlas.nomic.ai/) The dataset defaults to `main` which is `v1.0`. To download a specific version, you can pass an argument to the keyword `revision` in `load_dataset`: ```python from datasets import load_dataset jazzy = load_dataset("nomic-ai/gpt4all-j-prompt-generations", revision='v1.2-jazzy') ``` - **Homepage:** [gpt4all.io](https://gpt4all.io/) - **Repository:** [gpt4all](https://github.com/nomic-ai/gpt4all) - **Paper:** [Technical Report](https://static.nomic.ai/gpt4all/2023_GPT4All-J_Technical_Report_2.pdf) - **Atlas Map:** [Map of Prompts](https://atlas.nomic.ai/map/gpt4all-j-prompts-curated) and [Responses](https://atlas.nomic.ai/map/gpt4all-j-response-curated)
nomic-ai/gpt4all-j-prompt-generations
[ "size_categories:100K<n<1M", "language:en", "license:apache-2.0", "region:us" ]
2023-04-10T20:59:10+00:00
{"language": ["en"], "license": "apache-2.0", "size_categories": ["100K<n<1M"], "dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "response", "dtype": "string"}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1774285641, "num_examples": 808812}], "download_size": 990673616, "dataset_size": 1774285641}}
2023-04-24T14:20:43+00:00
c1b9a23cfbb77b34668df5705b6e8832a06046c1
# **CAMEL: Communicative Agents for “Mind” Exploration of Large Scale Language Model Society** - **Github:** https://github.com/lightaime/camel - **Website:** https://www.camel-ai.org/ - **Arxiv Paper:** https://arxiv.org/abs/2303.17760 ## Dataset Summary Math dataset is composed of 50K problem-solution pairs obtained using GPT-4. The dataset problem-solutions pairs generating from 25 math topics, 25 subtopics for each topic and 80 problems for each "topic,subtopic" pairs. We provide the data in `math50k.zip`. ## Data Fields **The data fields for files in `math50k.zip` are as follows:** * `role_1`: assistant role * `topic`: math topic * `sub_topic`: math subtopic belonging to topic * `message_1`: refers to the problem the assistant is asked to solve. * `message_2`: refers to the solution provided by the assistant. Note: File naming refers to {`topic_index`}\_{`subtopic_index`}\_{`problem_number`}. **Download in python** ``` from huggingface_hub import hf_hub_download hf_hub_download(repo_id="camel-ai/math", repo_type="dataset", filename="math50k.zip", local_dir="datasets/", local_dir_use_symlinks=False) ``` ### Citation ``` @misc{li2023camel, title={CAMEL: Communicative Agents for "Mind" Exploration of Large Scale Language Model Society}, author={Guohao Li and Hasan Abed Al Kader Hammoud and Hani Itani and Dmitrii Khizbullin and Bernard Ghanem}, year={2023}, eprint={2303.17760}, archivePrefix={arXiv}, primaryClass={cs.AI} } ``` ## Disclaimer: This data was synthetically generated by GPT4 and might contain incorrect information. The dataset is there only for research purposes. --- license: cc-by-nc-4.0 ---
camel-ai/math
[ "task_categories:text-generation", "language:en", "license:cc-by-nc-4.0", "instruction-finetuning", "arxiv:2303.17760", "region:us" ]
2023-04-10T21:00:46+00:00
{"language": ["en"], "license": "cc-by-nc-4.0", "task_categories": ["text-generation"], "pretty_name": "CAMEL Math", "tags": ["instruction-finetuning"], "arxiv": 2303.1776, "extra_gated_prompt": "By using this data, you acknowledge and agree to utilize it solely for research purposes, recognizing that the dataset may contain inaccuracies due to its artificial generation through ChatGPT.", "extra_gated_fields": {"Name": "text", "Email": "text"}, "I will adhere to the terms and conditions of this dataset": "checkbox"}
2023-06-22T20:59:52+00:00
ef029077982390caa1d9c234f325b417010d7c75
# Dataset Card for "chunk_179" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
one-sec-cv12/chunk_179
[ "region:us" ]
2023-04-10T21:40:59+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 25908371712.0, "num_examples": 269744}], "download_size": 23376626378, "dataset_size": 25908371712.0}}
2023-04-10T22:03:47+00:00
e6d4367461770e9b74fc95a9b860527f24dc1f73
# Dataset Card for "CV_train" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
NathanRoll/CV_train
[ "region:us" ]
2023-04-10T22:12:35+00:00
{"dataset_info": {"features": [{"name": "client_id", "dtype": "string"}, {"name": "path", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 48000}}}, {"name": "sentence", "dtype": "string"}, {"name": "up_votes", "dtype": "int64"}, {"name": "down_votes", "dtype": "int64"}, {"name": "age", "dtype": "string"}, {"name": "gender", "dtype": "string"}, {"name": "accent", "dtype": "string"}, {"name": "locale", "dtype": "string"}, {"name": "segment", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 20472485884.512, "num_examples": 564337}], "download_size": 25625771028, "dataset_size": 20472485884.512}}
2023-04-11T00:01:36+00:00
505731738f3fc24cf0e7d55dc9ff40acc8e9e32e
# Electricity The [Electricity dataset](https://www.openml.org/search?type=data&sort=runs&id=151&status=active) from the [OpenML repository](https://www.openml.org/). # Configurations and tasks | **Configuration** | **Task** | **Description** | |-------------------|---------------------------|-------------------------| | electricity | Binary classification | Has the electricity cost gone up?| # Usage ```python from datasets import load_dataset dataset = load_dataset("mstz/electricity", "electricity")["train"] ```
mstz/electricity
[ "task_categories:tabular-classification", "size_categories:10k<n<100K", "language:en", "license:cc", "electricity", "tabular_classification", "binary_classification", "UCI", "region:us" ]
2023-04-10T22:24:07+00:00
{"language": ["en"], "license": "cc", "size_categories": ["10k<n<100K"], "task_categories": ["tabular-classification"], "pretty_name": "Electricity", "tags": ["electricity", "tabular_classification", "binary_classification", "UCI"], "configs": ["electricity"]}
2023-04-16T16:30:58+00:00
a56670d13cb288c3de159763568ba16511614efe
# Pol The [Pol dataset](https://www.openml.org/search?type=data&status=active&id=722) from the [OpenML repository](https://www.openml.org/). # Configurations and tasks | **Configuration** | **Task** | **Description** | |-------------------|---------------------------|-------------------------| | pol | Binary classification | Has the pol cost gone up?| # Usage ```python from datasets import load_dataset dataset = load_dataset("mstz/pol", "pol")["train"] ```
mstz/pol
[ "task_categories:tabular-classification", "size_categories:10K<n<100K", "language:en", "license:cc", "openml", "binary_classification", "region:us" ]
2023-04-10T22:31:17+00:00
{"language": ["en"], "license": "cc", "size_categories": ["10K<n<100K"], "task_categories": ["tabular-classification"], "pretty_name": "Pol binary", "tags": ["openml", "binary_classification"]}
2023-12-22T18:56:27+00:00
599dc68d3665303dfacf00b55192329cf2b2c8c3
# Dataset Card for "chunk_180" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
one-sec-cv12/chunk_180
[ "region:us" ]
2023-04-10T22:55:54+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 18216943920.875, "num_examples": 189665}], "download_size": 16530141980, "dataset_size": 18216943920.875}}
2023-04-10T23:11:05+00:00
446b55c33c67862760ebc104455424b17dd30834
# House16 The [House16 dataset](https://www.openml.org/search?type=data&sort=runs&id=821&status=active) from the [OpenML repository](https://www.openml.org/). # Configurations and tasks | **Configuration** | **Task** | |-------------------|---------------------------| | house16 | Binary classification | # Usage ```python from datasets import load_dataset dataset = load_dataset("mstz/house16", "house16")["train"] ```
mstz/house16
[ "task_categories:tabular-classification", "size_categories:10k<n<100K", "language:en", "license:cc", "house16", "tabular_classification", "binary_classification", "region:us" ]
2023-04-10T22:58:16+00:00
{"language": ["en"], "license": "cc", "size_categories": ["10k<n<100K"], "task_categories": ["tabular-classification"], "pretty_name": "House16", "tags": ["house16", "tabular_classification", "binary_classification"], "configs": ["house16"]}
2023-04-16T16:31:52+00:00
acd7bd0ad4b3f56bb139a9e6b1bc7fbae275d659
# Phoneme The [Phoneme dataset](https://www.openml.org/search?type=data&sort=runs&id=1489&status=active) from the [OpenML repository](https://www.openml.org/). # Configurations and tasks | **Configuration** | **Task** | |-------------------|---------------------------| | phoneme | Binary classification | # Usage ```python from datasets import load_dataset dataset = load_dataset("mstz/phoneme")["train"] ```
mstz/phoneme
[ "task_categories:tabular-classification", "size_categories:10k<n<100K", "language:en", "phoneme", "tabular_classification", "binary_classification", "region:us" ]
2023-04-10T23:14:16+00:00
{"language": ["en"], "size_categories": ["10k<n<100K"], "task_categories": ["tabular-classification"], "pretty_name": "Phoneme", "tags": ["phoneme", "tabular_classification", "binary_classification"], "configs": ["phoneme"]}
2023-04-10T23:14:47+00:00
cd76be5831cf2b3a23c2ce189b6426df20ed3280
zingzeu/test_dataset
[ "license:unknown", "region:us" ]
2023-04-11T00:29:18+00:00
{"license": "unknown"}
2023-04-11T00:29:18+00:00
9d9bc478730e70570de6b93805cc9a0b8e8cd314
# Dataset Card for "chunk_181" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
one-sec-cv12/chunk_181
[ "region:us" ]
2023-04-11T00:34:31+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 19793571840.0, "num_examples": 206080}], "download_size": 17855216853, "dataset_size": 19793571840.0}}
2023-04-11T00:51:39+00:00
31445959d4f4e1b6fafaed6012cf761331f427ed
# Dataset Card for "NTU-Stem" ![sample](sample.png) The NTU Tree Dataset is a high-resolution few-shot learning dataset of the stem images of 15 different tree species found in the National Taiwan University (NTU) campus. The dataset was collected using personal cellphones in an effort to increase familiarity with the campus’s natural beauty. The dataset includes images of the stem of the following 15 tree species, along with their Chinese and English names: | 中文名稱 | English Name | |--------------|--------------------------| | 大王椰子 | Royal Palm | | 土肉桂 | Indigenous Cinnamon Tree | | 大葉桃花心木 | Honduras Mahogany | | 小葉南洋杉 | Araucaria Excelsa | | 石栗 | Indian Walnut | | 朴樹 | Chinese Hackberry | | 血桐 | Macaranga | | 垂榕 | White Barkfig | | 肯氏南洋杉 | Hoop Pine | | 美人樹 | Floss-silk Tree | | 烏桕 | Chinese Tallow Tree | | 楓香 | Formosan Sweet Gum | | 榕樹 | Marabutan | | 蒲葵 | Chinese Fan Palm | | 樟樹 | Comphor Tree | The dataset contains a total of 240 images, with each species class containing 8 to 16 training images and the remaining 8 to 10 images serving as test data. The images were captured at a resolution of approximately 3k x 3k pixels, providing high detail for the purpose of few-shot learning. ## Acknowledgements This dataset was collected by the following students of National Taiwan University in Graduate Institute of Networking and Multimedia (GINM) and the Department of Computer Science and Information Engineering (CSIE). Thanks to [@liswei](https://huggingface.co/liswei), [@roger0426](https://huggingface.co/roger0426), [@CYLiao1127](https://github.com/CYLiao1127), and [@j1u2l3i4a5n](https://github.com/j1u2l3i4a5n) for collecting the dataset.
liswei/NTU-Tree
[ "task_categories:image-classification", "size_categories:n<1K", "license:cc-by-nc-4.0", "biology", "region:us" ]
2023-04-11T01:28:14+00:00
{"license": "cc-by-nc-4.0", "size_categories": ["n<1K"], "task_categories": ["image-classification"], "pretty_name": "NTU-Stem", "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "araucaria_excelsa", "1": "chinese_fan_palm", "2": "chinese_hackberry", "3": "chinese_tallow_tree", "4": "comphor_tree", "5": "formosan_sweet_gum", "6": "honduras_mahogany", "7": "hoop_pine", "8": "indian_walnut", "9": "indigenous_cinnamon_tree", "10": "macaranga", "11": "marabutan", "12": "royal_palm", "13": "silk_floss_tree", "14": "white_barkfig"}}}}], "splits": [{"name": "train", "num_bytes": 720396640, "num_examples": 224}, {"name": "test", "num_bytes": 429531935, "num_examples": 132}, {"name": "validation", "num_bytes": 406231409, "num_examples": 122}], "download_size": 1554491709, "dataset_size": 1556159984}, "tags": ["biology"]}
2023-04-15T16:07:22+00:00
3dd21720e1b7a1f480d14d333f5e06e7cfad8ba2
# shareGPT 한국어 번역 데이터셋 이 프로젝트는 shareGPT [데이터셋](https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/tree/main) 60만 대화문을 DeepL 을 통해 한국어로 번역하고 있습니다. 현재 번역이 진행 중이며, 아래의 진행상황을 참고해 주세요. ## 진행상황 62만 대화문중 62만 대화문번역완료. ## 파일구조 - original_dataset.json : 원본 shareGPT 파일(62만 영문대화문) - ko_dataset.json : 번역본 shareGPT파일, 구조 원본과 동일 - ko_dataset_2.json : ko_dataset.json 에서 파일구조가 불안정한(대화가 없거나, 대화의 시작이 gpt 인데 그 이후 대화가 없는것들) 대화 삭제 버전 - ko_alpaca_style_dataset.json : 알파카 파인튜닝을 위한 구조로 변경 ## 라이센스 원본 데이터가 OPENAI 이기 때문에 해당 [약관](https://openai.com/policies/terms-of-use)에 따릅니다. 그 이외의 부분은 다음 라이센스를 따릅니다: 저작자표시 2.0 대한민국 (CC BY 2.0 KR) ## 만든이 https://github.com/melodysdreamj
junelee/sharegpt_deepl_ko
[ "region:us" ]
2023-04-11T01:30:26+00:00
{}
2023-04-27T00:43:36+00:00
94f06efed79162775c8d23a09e3cd606c0b2b9c0
GmoData/ui5_db
[ "license:cc-by-4.0", "region:us" ]
2023-04-11T01:53:11+00:00
{"license": "cc-by-4.0"}
2023-04-11T01:58:26+00:00
b8ded0ce0aeb721e45678f244a75d541fcb489da
mouadse/medicare
[ "license:mit", "region:us" ]
2023-04-11T02:01:55+00:00
{"license": "mit"}
2023-04-11T02:01:55+00:00
1e19d14b1a7c3bc3e70eb05c112e029eadbae586
# Dataset Card for "imdb_misspelled_0" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
sethapun/imdb_misspelled_0
[ "region:us" ]
2023-04-11T02:24:16+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "neg", "1": "pos"}}}}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 33632823, "num_examples": 25000}, {"name": "validation", "num_bytes": 32850685, "num_examples": 25000}], "download_size": 43184320, "dataset_size": 66483508}}
2023-04-11T02:24:22+00:00
35a8f4726b65783700509bfb14bbc1dcafd25c5f
# Dataset Card for "imdb_misspelled_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
sethapun/imdb_misspelled_10
[ "region:us" ]
2023-04-11T02:24:23+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "neg", "1": "pos"}}}}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 33634327, "num_examples": 25000}, {"name": "validation", "num_bytes": 32850518, "num_examples": 25000}], "download_size": 52525337, "dataset_size": 66484845}}
2023-04-11T02:24:30+00:00
ba292ff88d60523bd47fde558b36d90865770a4c
# Dataset Card for "imdb_misspelled_20" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
sethapun/imdb_misspelled_20
[ "region:us" ]
2023-04-11T02:24:31+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "neg", "1": "pos"}}}}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 33633433, "num_examples": 25000}, {"name": "validation", "num_bytes": 32850078, "num_examples": 25000}], "download_size": 49040121, "dataset_size": 66483511}}
2023-04-11T02:24:37+00:00
484302054929b3e9049b17ccdba458ef8ef0f277
# Dataset Card for "imdb_misspelled_30" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
sethapun/imdb_misspelled_30
[ "region:us" ]
2023-04-11T02:24:38+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "neg", "1": "pos"}}}}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 33632801, "num_examples": 25000}, {"name": "validation", "num_bytes": 32851081, "num_examples": 25000}], "download_size": 47443707, "dataset_size": 66483882}}
2023-04-11T02:24:45+00:00
6456642776010d69c4572ecfe133797b139eee8c
# Dataset Card for "imdb_misspelled_40" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
sethapun/imdb_misspelled_40
[ "region:us" ]
2023-04-11T02:24:46+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "neg", "1": "pos"}}}}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 33632330, "num_examples": 25000}, {"name": "validation", "num_bytes": 32850630, "num_examples": 25000}], "download_size": 46551199, "dataset_size": 66482960}}
2023-04-11T02:24:53+00:00
f70fe763e87ec74844d83c29e036cc75bad2943e
# Dataset Card for "imdb_misspelled_50" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
sethapun/imdb_misspelled_50
[ "region:us" ]
2023-04-11T02:24:53+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "neg", "1": "pos"}}}}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 33633024, "num_examples": 25000}, {"name": "validation", "num_bytes": 32850470, "num_examples": 25000}], "download_size": 45974325, "dataset_size": 66483494}}
2023-04-11T02:25:01+00:00
ed8c6ed2191c3b50073a0ee187ea0b60841f1265
toninsaf/asdasd
[ "license:bigscience-bloom-rail-1.0", "region:us" ]
2023-04-11T03:20:28+00:00
{"license": "bigscience-bloom-rail-1.0"}
2023-04-11T03:20:28+00:00
de47eb628ee6c84de8c1eda8ed7940430fa5a24c
Sarjono/Tester
[ "license:cc", "region:us" ]
2023-04-11T03:30:48+00:00
{"license": "cc"}
2023-04-11T03:30:48+00:00
7f0520e168deced1d2f2b8c5ac27f6a7835c7ef8
basant20/nutfact
[ "license:mit", "region:us" ]
2023-04-11T04:26:40+00:00
{"license": "mit"}
2023-04-12T05:20:36+00:00
c68e61ac2edd5b0dc1f6629c836929e9eb2800a2
# Dataset Card for "fill100k" Kinda similar to https://huggingface.co/datasets/offchan/fill50k but with more images, lower resolution, uniform circle radius, and uniform color randomization covering the full spectrum of all possible colors. Colors are 2x3 array containing circle color followed by background color in RGB format. Color names are derived from the closest color in a list of predefined common colors. Distance between 2 colors is calculated using the sum of weighted squared differences. The weights are 0.299, 0.587, and 0.114.
offchan/fill100k
[ "region:us" ]
2023-04-11T04:54:05+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}, {"name": "colors", "dtype": {"array2_d": {"shape": [2, 3], "dtype": "float32"}}}, {"name": "conditioning_image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 66204989.0, "num_examples": 100000}], "download_size": 46802201, "dataset_size": 66204989.0}}
2023-04-17T15:11:53+00:00
abe08595fffe2f3e59ba8ffe9bb8e2490b0cecf1
Amit19july/simpleODdataset
[ "license:other", "region:us" ]
2023-04-11T05:06:19+00:00
{"license": "other"}
2023-04-11T05:06:19+00:00
eaa4001b17d676090156ea8bb8beb3ace74deb6e
dog/unav-100
[ "license:cc-by-4.0", "region:us" ]
2023-04-11T05:09:05+00:00
{"license": "cc-by-4.0", "dataset_info": {"features": [{"name": "ytid", "dtype": "string"}, {"name": "start", "dtype": "float64"}, {"name": "end", "dtype": "float64"}, {"name": "duration", "dtype": "float64"}, {"name": "annotations", "list": [{"name": "label", "dtype": "string"}, {"name": "label_id", "dtype": "int64"}, {"name": "segment_end", "dtype": "float64"}, {"name": "segment_start", "dtype": "float64"}]}], "splits": [{"name": "train", "num_bytes": 1044336, "num_examples": 6489}, {"name": "validation", "num_bytes": 346495, "num_examples": 2134}, {"name": "test", "num_bytes": 342199, "num_examples": 2167}], "download_size": 709359, "dataset_size": 1733030}}
2023-04-11T06:12:50+00:00
a839c25e203a79377e7d00e5d8c94a03319c10c2
# Deduplicated version of Isotonic/human_assistant_conversation - Deduped with max jaccard similarity of 0.75
Isotonic/human_assistant_conversation_deduped
[ "task_categories:text-generation", "task_categories:conversational", "size_categories:100K<n<1M", "language:en", "language:es", "language:zh", "license:afl-3.0", "region:us" ]
2023-04-11T05:16:00+00:00
{"language": ["en", "es", "zh"], "license": "afl-3.0", "size_categories": ["100K<n<1M"], "task_categories": ["text-generation", "conversational"], "dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "response", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1069951715.5157907, "num_examples": 586784}, {"name": "test", "num_bytes": 133745787.85612378, "num_examples": 73349}, {"name": "validation", "num_bytes": 133743964.43947384, "num_examples": 73348}], "download_size": 701202899, "dataset_size": 1337441467.8113883}}
2023-07-05T11:35:56+00:00
3df0666efa1e25687b2d6a342ec7f59dce8a78ac
jcal/lab5DataSet
[ "license:mit", "region:us" ]
2023-04-11T05:43:26+00:00
{"license": "mit"}
2023-04-11T22:30:00+00:00
028101f229a069137e54cb2a003e46ba855dd2ba
onepiece808/txwdataset
[ "license:openrail", "region:us" ]
2023-04-11T06:03:23+00:00
{"license": "openrail"}
2023-04-11T06:03:23+00:00
b070ba53aa8403ccc2b1f68006d2e14c3cd157bc
# Dataset Card for ReRelEM ## Dataset Description - **Paper:** [Relation detection between named entities: report of a shared task](https://aclanthology.org/W09-2421.pdf) - **Point of Contact:** [Hugo Gonçalo Oliveira]([email protected]) ### Dataset Summary The ReRelEM dataset is designed for the detection and classification of relations between named entities in Portuguese text. It contains 2226 training, 701 validation, and 805 test instances. Each instance contains two sentences with two entities enclosed by the tags [E1] and [E2]. The dataset provides a fourfold relationship classification: identity, included-in, located-in, and other (which is detailed into twenty different relations). It's important to note that, although we maintained more than 99% of the original instances, this is not a full representation of the original ReRelEM dataset. The dataset was split into train, validation, and test sets, after which 21 instances with relation types not included in the training set were dropped from the test set. Furthermore, 7 instances from the original dataset that had formatting errors and could not be resolved into post-processed records were also dropped. ### Supported Tasks and Leaderboards - Relation extraction: The primary task of this dataset is to classify relations between named entities. ### Languages - Portuguese ## Dataset Structure ### Data Instances An example data instance from the dataset: ```json { "docid": "cver", "sentence1": "O PRESIDENTE Sarkozy abriu a Conferência de Dadores realizada em Paris com uma frase grandiloquente sobre a necessidade urgente de criar um Estado palestiniano no fim de 2008 . O Presidente ou é mentiroso ou finge-se ignorante, ou as duas coisas. Depois do falhanço esperado da cimeira de Annapolis , um modo de [E2]Condoleezza Rice[/E2] salvar a face e de a Administração | Administração americana e a Europa continuarem a fingir que estão interessadas em resolver o conflito israelo-palestiniano e de lavarem as mãos de tudo o resto, Sarkozy não pode ignorar que o momento para pronunciamentos débeis é o menos adequado. Tony Blair , depois de ter minado todo o processo de paz do Médio Oriente ao ordenar a invasão do Iraque de braço dado com [E1]Bush[/E1] , continua a emitir piedades deste género, e diz que está na altura de resolver o problema e que ele pode ser resolvido. Blair não sabe o que diz.", "sentence2": "nan", "label": "relacao_profissional", "same_text": true } ``` ### Data Fields - `docid`: Document ID of both sentences (sentence1 and sentence2) - `sentence1`: The first sentence with an entity span enclosed by the tags [E1] and [/E1] - `sentence2`: The second sentence with an entity span enclosed by the tags [E2] and [/E2] - `label`: The type of relation between the entities - `same_text`: True if both entity spans appear in the same sentence. If True, `sentence2` will be empty. ### Data Splits | | train | validation | test | |--------|-------|------------|------| | Instances | 2226 | 701 | 805 | The dataset was divided in a manner that ensured sentences from the same document did not appear in more than one split. ### Citation Information ```bibtex @inproceedings{freitas2009relation, title={Relation detection between named entities: report of a shared task}, author={Freitas, Cl{\\'a}udia and Santos, Diana and Mota, Cristina and Oliveira, Hugo Gon{\\c{c}}alo and Carvalho, Paula}, booktitle={Proceedings of the Workshop on Semantic Evaluations: Recent Achievements and Future Directions (SEW-2009)}, pages={129--137}, year={2009} } ``` ### Contributions Thanks to [@ruanchaves](https://github.com/ruanchaves) for adding this dataset.
ruanchaves/rerelem
[ "task_categories:text-classification", "annotations_creators:expert-generated", "language_creators:found", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:extended|harem", "language:pt", "relation extraction,", "region:us" ]
2023-04-11T06:18:00+00:00
{"annotations_creators": ["expert-generated"], "language_creators": ["found"], "language": ["pt"], "license": [], "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "source_datasets": ["extended|harem"], "task_categories": ["text-classification"], "task_ids": [], "pretty_name": "ReRelEM", "tags": ["relation extraction,"]}
2023-04-14T10:01:24+00:00
efac61a4f6d68c9c6f4f6e6c15bd70c5d5db0c21
milly233/govdatata_embedding
[ "license:afl-3.0", "region:us" ]
2023-04-11T06:19:32+00:00
{"license": "afl-3.0"}
2023-04-11T06:20:26+00:00
0ece3c64ef0ba2c1b9fc138c755a565327ed36ef
MahouLarge/gutenberg_spacy_ner
[ "license:apache-2.0", "region:us" ]
2023-04-11T06:42:34+00:00
{"license": "apache-2.0"}
2023-04-11T06:42:34+00:00
7e51772072e0e601b90e4f70ec5b593b45d493aa
diyi/ziliao
[ "license:openrail", "region:us" ]
2023-04-11T06:59:40+00:00
{"license": "openrail"}
2023-04-11T06:59:40+00:00
1d369883cf740baebbcf67dc60c620ed398fe289
# Dataset Card for "tawikidump-20230320-tokenizer-clean" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
livinNector/tawikidump-20230320-tokenizer-clean
[ "region:us" ]
2023-04-11T06:59:55+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "tawikiquote", "num_bytes": 6354086, "num_examples": 1211}, {"name": "tawikisource", "num_bytes": 112492580, "num_examples": 5031}, {"name": "tawiki", "num_bytes": 725939876, "num_examples": 155212}, {"name": "tawikinews", "num_bytes": 13922393, "num_examples": 3372}, {"name": "tawiktionary", "num_bytes": 152591462, "num_examples": 406557}, {"name": "tawikibooks", "num_bytes": 4553789, "num_examples": 1155}], "download_size": 0, "dataset_size": 1015854186}}
2023-04-11T12:38:33+00:00
f855b1017490f1552aa873db1ca65f0e6d71de93
# Dataset Card for "alpaca_bangla" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
argilla/alpaca_bangla
[ "region:us" ]
2023-04-11T07:07:33+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "null"}, {"name": "inputs", "struct": [{"name": "_instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}]}, {"name": "prediction", "list": [{"name": "label", "dtype": "string"}, {"name": "score", "dtype": "float64"}]}, {"name": "prediction_agent", "dtype": "null"}, {"name": "annotation", "dtype": "null"}, {"name": "annotation_agent", "dtype": "null"}, {"name": "vectors", "dtype": "null"}, {"name": "multi_label", "dtype": "bool"}, {"name": "explanation", "dtype": "null"}, {"name": "id", "dtype": "string"}, {"name": "metadata", "dtype": "null"}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "timestamp[us]"}, {"name": "metrics", "dtype": "null"}], "splits": [{"name": "train", "num_bytes": 1919536, "num_examples": 1000}], "download_size": 717463, "dataset_size": 1919536}}
2023-04-11T07:07:37+00:00
477eb44cb2db80eb500ba3476aa3293639da2904
# Dataset Card for "ta-oscar-tokenizer-clean" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
livinNector/ta-oscar-tokenizer-clean
[ "region:us" ]
2023-04-11T07:19:34+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 9037331867, "num_examples": 556772}], "download_size": 2891190241, "dataset_size": 9037331867}}
2023-04-12T13:51:03+00:00
faab3cde6c313abdf8acacbd1d87d052219bff6d
# Dataset Card for "tawikidump-20230320-clean" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
livinNector/tawikidump-20230320-clean
[ "region:us" ]
2023-04-11T07:33:43+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "tawikiquote", "num_bytes": 6415052, "num_examples": 1211}, {"name": "tawikisource", "num_bytes": 114028540, "num_examples": 5031}, {"name": "tawiki", "num_bytes": 736907252, "num_examples": 155212}, {"name": "tawikinews", "num_bytes": 14149677, "num_examples": 3372}, {"name": "tawiktionary", "num_bytes": 154806778, "num_examples": 406557}, {"name": "tawikibooks", "num_bytes": 4631755, "num_examples": 1155}], "download_size": 310101942, "dataset_size": 1030939054}}
2023-04-11T12:56:30+00:00
e75d543dff4159197117ca4135d1f70841c78ae1
# Dataset Card for "reklamation24_supermaerkte-drogerien" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
fathyshalab/reklamation24_supermaerkte-drogerien
[ "region:us" ]
2023-04-11T07:43:11+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "label_name", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 234328, "num_examples": 410}, {"name": "test", "num_bytes": 58653, "num_examples": 103}], "download_size": 0, "dataset_size": 292981}}
2023-04-19T07:17:08+00:00
a7d034bb7163c5a9f22140467b1a8e2bc5ce32c2
SeifXT/Ayesha_Erotica
[ "region:us" ]
2023-04-11T07:43:20+00:00
{}
2023-04-11T07:47:11+00:00
46e0c4baed5c4725636b6c278bbdb10ab0f4886f
# Dataset Card for "ta-oscar-clean" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
livinNector/ta-oscar-clean
[ "region:us" ]
2023-04-11T07:45:52+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 9286292818, "num_examples": 556772}], "download_size": 3111108905, "dataset_size": 9286292818}}
2023-04-13T04:22:17+00:00
a4e76248a96730907f0670110ac09df6848d0f41
# Dataset Card for "neutral_to_flirty" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ieuniversity/neutral_to_flirty
[ "region:us" ]
2023-04-11T07:51:22+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "non-flirty", "dtype": "string"}, {"name": "flirty", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 12300, "num_examples": 116}, {"name": "test", "num_bytes": 3152, "num_examples": 30}], "download_size": 14651, "dataset_size": 15452}}
2023-04-11T07:51:27+00:00