sha
stringlengths 40
40
| text
stringlengths 0
13.4M
| id
stringlengths 2
117
| tags
list | created_at
stringlengths 25
25
| metadata
stringlengths 2
31.7M
| last_modified
stringlengths 25
25
|
---|---|---|---|---|---|---|
b4e7d42750cbc1d81f9b85b98b13b48c88092adb
|
This is the GPT4-LLM dataset from : https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM
It has been filtered of all OpenAI disclaimers and refusals. (Disclaimer: It may have removed some additional things besides just OAI disclaimers, as I used the followings script which is a bit more broad: https://huggingface.co/datasets/ehartford/WizardLM_alpaca_evol_instruct_70k_unfiltered/blob/main/wizardlm_clean.py)
There is a modified script of that in the repo that was used specifically for this.
|
teknium/GPT4-LLM-Cleaned
|
[
"region:us"
] |
2023-05-02T19:11:04+00:00
|
{}
|
2023-05-04T00:48:35+00:00
|
a10c917c47d9283eb7aad55e330b8146f1b60a0e
|
# Dataset Card for "dvoice3"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
bouim/dvoice3
|
[
"region:us"
] |
2023-05-02T19:21:52+00:00
|
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "sentence", "dtype": "string"}, {"name": "duration", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 1459262910.208, "num_examples": 2117}, {"name": "test", "num_bytes": 75535309.0, "num_examples": 114}], "download_size": 1032875305, "dataset_size": 1534798219.208}}
|
2023-05-02T19:22:37+00:00
|
55c5300d8809f129f65c2c378671d2d8c8b41f3e
|
# Dataset Card for "dvoice3_alltrain"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
bouim/dvoice3_alltrain
|
[
"region:us"
] |
2023-05-02T19:30:21+00:00
|
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "sentence", "dtype": "string"}, {"name": "duration", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 1459262910.208, "num_examples": 2117}, {"name": "test", "num_bytes": 75535309.0, "num_examples": 114}], "download_size": 1032875305, "dataset_size": 1534798219.208}}
|
2023-05-02T19:31:07+00:00
|
d12a943d9d3ff2fb10395c749b121ac862c5747a
|
# Dataset Card for "ak_edit_issue_analysis_128_v2-reward"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
AlekseyKorshuk/ak_edit_issue_analysis_128_v2-reward
|
[
"region:us"
] |
2023-05-02T19:46:56+00:00
|
{"dataset_info": {"features": [{"name": "input_text", "dtype": "string"}, {"name": "response", "dtype": "string"}, {"name": "output_text", "dtype": "string"}, {"name": "user_id", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}, {"name": "ak-edit-finetuned-triton-v0", "dtype": "string"}, {"name": "ak-edit-finetuned-triton-v1", "dtype": "string"}, {"name": "ak-edit-finetuned-triton-v2", "dtype": "string"}, {"name": "ak-0324-sft-no-reward", "dtype": "string"}, {"name": "continue_50m__ak-edit-finetuned-triton-v0", "dtype": "float64"}, {"name": "retry_12m__ak-edit-finetuned-triton-v0", "dtype": "float64"}, {"name": "stars_2m__ak-edit-finetuned-triton-v0", "dtype": "float64"}, {"name": "retry_and_continue_12m__ak-edit-finetuned-triton-v0", "dtype": "float64"}, {"name": "continue_50m__ak-edit-finetuned-triton-v1", "dtype": "float64"}, {"name": "retry_12m__ak-edit-finetuned-triton-v1", "dtype": "float64"}, {"name": "stars_2m__ak-edit-finetuned-triton-v1", "dtype": "float64"}, {"name": "retry_and_continue_12m__ak-edit-finetuned-triton-v1", "dtype": "float64"}, {"name": "continue_50m__ak-edit-finetuned-triton-v2", "dtype": "float64"}, {"name": "retry_12m__ak-edit-finetuned-triton-v2", "dtype": "float64"}, {"name": "stars_2m__ak-edit-finetuned-triton-v2", "dtype": "float64"}, {"name": "retry_and_continue_12m__ak-edit-finetuned-triton-v2", "dtype": "float64"}, {"name": "continue_50m__ak-0324-sft-no-reward", "dtype": "float64"}, {"name": "retry_12m__ak-0324-sft-no-reward", "dtype": "float64"}, {"name": "stars_2m__ak-0324-sft-no-reward", "dtype": "float64"}, {"name": "retry_and_continue_12m__ak-0324-sft-no-reward", "dtype": "float64"}], "splits": [{"name": "completion_issue", "num_bytes": 26644178, "num_examples": 9647}, {"name": "garbage_issue", "num_bytes": 9266994, "num_examples": 3565}, {"name": "gender_issue", "num_bytes": 13825810, "num_examples": 4405}], "download_size": 24836175, "dataset_size": 49736982}}
|
2023-05-02T19:54:51+00:00
|
842ec68d30e91de5199e255e131a2ac8db0cc7a8
|
Sharppen10/Dataset1
|
[
"license:other",
"region:us"
] |
2023-05-02T19:50:53+00:00
|
{"license": "other"}
|
2023-05-02T19:50:53+00:00
|
|
8fa5917661c1be3dd506595a9bae448b12aae68b
|
1949556 Examples, originally 2.4 Million
|
Dampish/ORION-L-DEDUPE
|
[
"region:us"
] |
2023-05-02T20:06:31+00:00
|
{}
|
2023-05-02T20:10:53+00:00
|
4ef94d2c3c43918f949eaf453e00d2e220dfb83a
|
# Dataset Card for "geotagged-streetview-images"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
rohanmyer/geotagged-streetview-images
|
[
"region:us"
] |
2023-05-02T20:09:48+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "AD", "1": "AM", "2": "AR", "3": "AT", "4": "AU", "5": "BA", "6": "BD", "7": "BE", "8": "BG", "9": "BR", "10": "BW", "11": "BY", "12": "CA", "13": "CG", "14": "CH", "15": "CI", "16": "CL", "17": "CN", "18": "CO", "19": "CR", "20": "CU", "21": "CZ", "22": "DE", "23": "DK", "24": "DO", "25": "DZ", "26": "EC", "27": "EE", "28": "EG", "29": "ES", "30": "FI", "31": "FR", "32": "GB", "33": "GE", "34": "GH", "35": "GM", "36": "GN", "37": "GT", "38": "HK", "39": "HN", "40": "HR", "41": "HT", "42": "ID", "43": "IE", "44": "IL", "45": "IN", "46": "IQ", "47": "IR", "48": "IT", "49": "JP", "50": "KE", "51": "KG", "52": "KH", "53": "KR", "54": "KZ", "55": "LK", "56": "LT", "57": "MA", "58": "MD", "59": "ME", "60": "MK", "61": "MM", "62": "MN", "63": "MQ", "64": "MT", "65": "MX", "66": "MY", "67": "NA", "68": "NG", "69": "NL", "70": "NO", "71": "NZ", "72": "PA", "73": "PE", "74": "PG", "75": "PH", "76": "PK", "77": "PL", "78": "PR", "79": "PS", "80": "PT", "81": "RE", "82": "RO", "83": "RS", "84": "RU", "85": "RW", "86": "SA", "87": "SE", "88": "SK", "89": "SL", "90": "SV", "91": "SY", "92": "TH", "93": "TL", "94": "TN", "95": "TR", "96": "TT", "97": "TW", "98": "TZ", "99": "UA", "100": "US", "101": "UY", "102": "UZ", "103": "VE", "104": "VN", "105": "VU", "106": "XK", "107": "ZA"}}}}], "splits": [{"name": "train", "num_bytes": 1403099432.281, "num_examples": 15229}], "download_size": 1375369739, "dataset_size": 1403099432.281}}
|
2023-05-02T21:42:09+00:00
|
8b84431610aa97c3ddc5426dbf1d361bddd69907
|
# Dataset Card for "truthful_qa_mc_pt"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
reaganjlee/truthful_qa_mc_pt
|
[
"region:us"
] |
2023-05-02T20:24:44+00:00
|
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "A", "1": "B", "2": "C", "3": "D"}}}}], "splits": [{"name": "train", "num_bytes": 106899.5, "num_examples": 342}, {"name": "validation", "num_bytes": 106899.5, "num_examples": 342}], "download_size": 113452, "dataset_size": 213799.0}}
|
2023-05-04T16:16:30+00:00
|
00713bf837679fed586b8b8e6025550851f6d3d4
|
# Dataset Card for "truthful_qa_mc_zh"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
reaganjlee/truthful_qa_mc_zh
|
[
"region:us"
] |
2023-05-02T20:29:02+00:00
|
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "A", "1": "B", "2": "C", "3": "D"}}}}], "splits": [{"name": "train", "num_bytes": 95395.0, "num_examples": 342}, {"name": "validation", "num_bytes": 95395.0, "num_examples": 342}], "download_size": 104268, "dataset_size": 190790.0}}
|
2023-05-04T16:20:00+00:00
|
b1ef06832c488f7cd0efe2e716c68ffe08bbe71b
|
# Dataset Card for "truthful_qa_mc_es"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
reaganjlee/truthful_qa_mc_es
|
[
"region:us"
] |
2023-05-02T20:34:17+00:00
|
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "A", "1": "B", "2": "C", "3": "D"}}}}], "splits": [{"name": "train", "num_bytes": 54060.5, "num_examples": 171}, {"name": "validation", "num_bytes": 54060.5, "num_examples": 171}], "download_size": 61484, "dataset_size": 108121.0}}
|
2023-05-04T16:18:36+00:00
|
63536514a9abfe7bf8d248dbbe0c080481f76bb7
|
# Dataset Card for Peewee Issues
## Dataset Summary
Peewee Issues is a dataset containing all the issues in the [Peewee github repository](https://github.com/coleifer/peewee) up to the last date of extraction (5/3/2023). It has been made for educational purposes in mind (especifically, to get me used to using Hugging Face's datasets), but can be used for multi-label classification or semantic search. The contents are all in English and concern SQL databases and ORM libraries.
|
akumoth/peewee-issues
|
[
"task_categories:text-classification",
"task_categories:feature-extraction",
"task_ids:topic-classification",
"task_ids:multi-label-classification",
"annotations_creators:found",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:n<1K",
"source_datasets:original",
"language:en",
"license:mit",
"peewee",
"python",
"github",
"issues",
"region:us"
] |
2023-05-02T20:35:17+00:00
|
{"annotations_creators": ["found"], "language_creators": ["found"], "language": ["en"], "license": ["mit"], "multilinguality": ["monolingual"], "size_categories": ["n<1K"], "source_datasets": ["original"], "task_categories": ["text-classification", "feature-extraction"], "task_ids": ["topic-classification", "multi-label-classification"], "pretty_name": "Peewee Github Issues", "dataset_info": {"features": [{"name": "url", "dtype": "string"}, {"name": "repository_url", "dtype": "string"}, {"name": "labels_url", "dtype": "string"}, {"name": "comments_url", "dtype": "string"}, {"name": "events_url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "number", "dtype": "int64"}, {"name": "title", "dtype": "string"}, {"name": "user", "struct": [{"name": "login", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "avatar_url", "dtype": "string"}, {"name": "gravatar_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "followers_url", "dtype": "string"}, {"name": "following_url", "dtype": "string"}, {"name": "gists_url", "dtype": "string"}, {"name": "starred_url", "dtype": "string"}, {"name": "subscriptions_url", "dtype": "string"}, {"name": "organizations_url", "dtype": "string"}, {"name": "repos_url", "dtype": "string"}, {"name": "events_url", "dtype": "string"}, {"name": "received_events_url", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "site_admin", "dtype": "bool"}]}, {"name": "labels", "list": [{"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "color", "dtype": "string"}, {"name": "default", "dtype": "bool"}, {"name": "description", "dtype": "null"}]}, {"name": "state", "dtype": "string"}, {"name": "locked", "dtype": "bool"}, {"name": "assignee", "dtype": "null"}, {"name": "assignees", "sequence": "null"}, {"name": "milestone", "dtype": "null"}, {"name": "comments", "sequence": "string"}, {"name": "created_at", "dtype": "timestamp[s]"}, {"name": "updated_at", "dtype": "timestamp[s]"}, {"name": "closed_at", "dtype": "timestamp[s]"}, {"name": "author_association", "dtype": "string"}, {"name": "active_lock_reason", "dtype": "string"}, {"name": "body", "dtype": "string"}, {"name": "reactions", "struct": [{"name": "url", "dtype": "string"}, {"name": "total_count", "dtype": "int64"}, {"name": "+1", "dtype": "int64"}, {"name": "-1", "dtype": "int64"}, {"name": "laugh", "dtype": "int64"}, {"name": "hooray", "dtype": "int64"}, {"name": "confused", "dtype": "int64"}, {"name": "heart", "dtype": "int64"}, {"name": "rocket", "dtype": "int64"}, {"name": "eyes", "dtype": "int64"}]}, {"name": "timeline_url", "dtype": "string"}, {"name": "performed_via_github_app", "dtype": "null"}, {"name": "state_reason", "dtype": "string"}, {"name": "draft", "dtype": "bool"}, {"name": "pull_request", "struct": [{"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "diff_url", "dtype": "string"}, {"name": "patch_url", "dtype": "string"}, {"name": "merged_at", "dtype": "timestamp[s]"}]}], "splits": [{"name": "train", "num_bytes": 9990717, "num_examples": 2814}], "download_size": 3607838, "dataset_size": 9990717}, "tags": ["peewee", "python", "github", "issues"]}
|
2023-05-03T14:53:06+00:00
|
5053d269dd5ff7351dac369b48171806366b0359
|
# Dataset Card for H4 Code Evaluation Prompts
These are a filtered set of prompts for evaluating code instruction models.
It will contain a variety of languages and task types.
Currently, we used ChatGPT (GPT-3.5-tubro) to generate these, so we encourage using them only for qualatative evaluation and not to train your models.
The generation of this data is similar to something like [CodeAlpaca](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20khttps://github.com/sahil280114/codealpaca#data-generation-process), which you can download [here](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k), but we intend to make these tasks both
a) more challenging, and
b) more curated.
These two things hopefully give a meaningful evaluation, but is not enough data to train an entire model.
The data corresponds to the following:
* 20 simple python instruction following,
* 20 intermediate python instruction following,
* 10 advanced python instruciton following,
* 15 python machine learning questions,
* 20 C++ instruction following,
* 10 html instruction following,
* 20 misc language code feedback questions.
Or, on a per language basis:
* Python: 81
* C++: 21
* html: 10
* Ruby: 1
* Bash: 1
* MATLAB: 1
* React: 1
* Scala: 1
* JavaScript: 1
* Java: 1
* PHP: 1
Or, per instruction type:
* Code completion / instruction following: 95
* Bug fixing: 20
To get the current information on the tasks, you can use the following snippet:
```
from datasets import load_dataset
d = load_dataset("HuggingFaceH4/code_evaluation_prompts")
language_list = d['train']['language']
language_count = {ele:language_list.count(ele) for ele in language_list}
```
Similar code can be run for the type of instruction (code generation vs. bug advice).
Interested in contributing? Open a PR with a specific language and question content.
Here are the ChatGPT prompts used to initiate the responses (which are then filtered), May 3rd 2023 version:
* Generate a bunch of instructions for coding questions in python (in the format of {"prompt": instruction})
* These have been useful, can you generate the last few that are the hardest and most Pythonic that you can think of?
* Taking a step back, can you generate 20 for me that don't need to be as hard, but are machine learning focused (e.g. a mix of PyTorch and Jax).
* Generate a bunch of instructions for coding questions in C++ (in the format of {"prompt": instruction})
* Can you generate 5 examples of instructions, with the same format {"prompt": text}, where the instruction has a piece of code with a bug, and you're asking for feedback on your code as if you wrote it?
|
HuggingFaceH4/code_evaluation_prompts
|
[
"size_categories:n<1K",
"language:en",
"code",
"rlhf",
"region:us"
] |
2023-05-02T20:38:49+00:00
|
{"language": ["en"], "size_categories": ["n<1K"], "dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "bug", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "meta", "struct": [{"name": "id", "dtype": "int64"}, {"name": "source", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 30363, "num_examples": 115}], "download_size": 15622, "dataset_size": 30363}, "tags": ["code", "rlhf"]}
|
2023-05-08T23:35:31+00:00
|
3eee986a50154ee635bda3497deb8189e544edfc
|
# Dataset Card for "Orion-L-C3DEDUPE-PREPROCESSED"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Dampish/Orion-L-C3DEDUPE-PREPROCESSED
|
[
"region:us"
] |
2023-05-02T20:44:01+00:00
|
{"dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 20773525124, "num_examples": 1949206}], "download_size": 4199445646, "dataset_size": 20773525124}}
|
2023-05-02T20:49:19+00:00
|
d4d7e22e18b88fcf3eb847d017d12c162457d211
|
# Dataset Card for "imdb_ar"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
christykoh/imdb_ar
|
[
"region:us"
] |
2023-05-02T20:45:38+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "neg", "1": "pos"}}}}], "splits": [{"name": "train", "num_bytes": 15786709, "num_examples": 25000}, {"name": "test", "num_bytes": 15776689, "num_examples": 25000}], "download_size": 16241794, "dataset_size": 31563398}}
|
2023-05-02T20:45:42+00:00
|
c956e20741594fbec1bb894dd99fa779dc4d1870
|
# Dataset Card for "Orion-Eval"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Dampish/Orion-Eval
|
[
"region:us"
] |
2023-05-02T20:49:19+00:00
|
{"dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 3769602, "num_examples": 350}], "download_size": 757991, "dataset_size": 3769602}}
|
2023-05-02T20:49:22+00:00
|
6c8624e425aaae64c82227d2cd36eb7d34ec9aa6
|
# Dataset Card for "ffhq_controlnet_5_2_23"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
cr7Por/ffhq_controlnet_5_2_23
|
[
"region:us"
] |
2023-05-02T20:50:50+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "image_crop", "dtype": "image"}, {"name": "image_caption", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 15826187729.91, "num_examples": 39641}], "download_size": 15842739047, "dataset_size": 15826187729.91}}
|
2023-05-02T22:27:54+00:00
|
ba661c7841277974d05232186fc5ad81c2bf6459
|
# Dataset Card for "truthful_qa_mc_it"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
reaganjlee/truthful_qa_mc_it
|
[
"region:us"
] |
2023-05-02T20:53:17+00:00
|
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "A", "1": "B", "2": "C", "3": "D"}}}}], "splits": [{"name": "train", "num_bytes": 215135, "num_examples": 684}], "download_size": 111031, "dataset_size": 215135}}
|
2023-05-03T05:40:00+00:00
|
21e4e9cffad3a688030022a96c826c00d57402fc
|
# Dataset Card for "imdb_zh"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
christykoh/imdb_zh
|
[
"region:us"
] |
2023-05-02T20:57:10+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "neg", "1": "pos"}}}}], "splits": [{"name": "train", "num_bytes": 18760648, "num_examples": 25000}, {"name": "test", "num_bytes": 18574771, "num_examples": 25000}], "download_size": 23908717, "dataset_size": 37335419}}
|
2023-05-02T20:57:15+00:00
|
7afa8ec2f2023339245395e9a9f14aee3b2ee68f
|
# Dataset Card for "iemocap_audio_text"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Ar4ikov/iemocap_audio_text
|
[
"region:us"
] |
2023-05-02T21:00:16+00:00
|
{"dataset_info": {"features": [{"name": "_id", "dtype": "string"}, {"name": "activation", "dtype": "float64"}, {"name": "dominance", "dtype": "float64"}, {"name": "emotion", "dtype": "string"}, {"name": "end_time", "dtype": "float64"}, {"name": "start_time", "dtype": "float64"}, {"name": "titre", "dtype": "string"}, {"name": "to_translate", "dtype": "string"}, {"name": "translated", "dtype": "string"}, {"name": "valence", "dtype": "float64"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 1479523969.375, "num_examples": 10039}], "download_size": 1405738430, "dataset_size": 1479523969.375}}
|
2023-05-02T21:25:17+00:00
|
ad7011ebb8fe1041b53997b92745281171d81166
|
# Dataset Card for "Translation"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Hansollll/Translation
|
[
"region:us"
] |
2023-05-02T21:18:22+00:00
|
{"dataset_info": {"features": [{"name": "sn", "dtype": "string"}, {"name": "translation", "struct": [{"name": "en", "dtype": "string"}, {"name": "ko", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 2460095.2, "num_examples": 8000}, {"name": "test", "num_bytes": 615023.8, "num_examples": 2000}], "download_size": 1973746, "dataset_size": 3075119.0}}
|
2023-05-02T21:18:45+00:00
|
8193c9263cb5b413c723fd2a73b8ae17ab09557b
|
# Dataset Card for "dolly_hhrlhf"
This dataset is a combination of [Databrick's dolly-15k](https://huggingface.co/datasets/databricks/databricks-dolly-15k) dataset and a filtered subset of [Anthropic's HH-RLHF](https://huggingface.co/datasets/Anthropic/hh-rlhf). It also includes a test split, which was missing in the original `dolly` set. That test set is composed of 200 randomly selected samples from `dolly` + 4,929 of the test set samples from HH-RLHF which made it through the filtering process. The train set contains 59,310 samples; `15,014 - 200 = 14,814` from Dolly, and the remaining 44,496 from HH-RLHF.
It is slightly larger than Alpaca, and in our experience of slightly higher quality, but is usable for commercial purposes so long as you follow the terms of the license.
## Filtering process
As mentioned, the HH-RLHF data in this dataset is filtered. Specifically, we take the first turn of the convesation, then remove any samples where the assistant:
- uses the word "human", "thank", or "sorry"
- asks a question
- uses a first person pronoun
This leaves samples which look like instruction-following, as opposed to conversation.
## License/Attribution
<!--
**Copyright (2023) MosaicML, Inc.**
-->
This dataset was developed at MosaicML (https://www.mosaicml.com) and its use is subject to the CC BY-SA 3.0 license.
Certain categories of material in the dataset include materials from the following sources, licensed under the CC BY-SA 3.0 license:
Wikipedia (various pages) - https://www.wikipedia.org/
Copyright © Wikipedia editors and contributors.
Databricks (https://www.databricks.com)
Copyright © Databricks
When citing this dataset, please use the following:
```
@misc{mosaicml2023dolly_hhrlhf,
author = {MosaicML},
title = {Dolly-HHRLHF Dataset},
year = {2023},
publisher = {HuggingFace Datasets},
howpublished = {https://huggingface.co/datasets/mosaicml/dolly_hhrlhf},
}
```
|
mosaicml/dolly_hhrlhf
|
[
"task_categories:text-generation",
"language:en",
"license:cc-by-sa-3.0",
"region:us"
] |
2023-05-02T21:27:06+00:00
|
{"language": ["en"], "license": "cc-by-sa-3.0", "task_categories": ["text-generation"], "pretty_name": "Dolly HH-RLHF", "dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "response", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 43781455.002688624, "num_examples": 59310}, {"name": "test", "num_bytes": 4479286.805304853, "num_examples": 5129}], "download_size": 24882010, "dataset_size": 48260741.80799348}}
|
2023-10-02T14:48:48+00:00
|
d5e661173af158f68ed4c037a89a29be57b17422
|
# Dataset Card for "isic2018_10"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
researchjyotsna/isic2018_10
|
[
"region:us"
] |
2023-05-02T21:51:55+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 2553258.0, "num_examples": 10}], "download_size": 2553780, "dataset_size": 2553258.0}}
|
2023-05-02T21:51:58+00:00
|
97ae49c1f9d4608eccd99532e326cd8c907edcf9
|
# Dataset Card for "imdb_it"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
christykoh/imdb_it
|
[
"region:us"
] |
2023-05-02T21:59:02+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "neg", "1": "pos"}}}}], "splits": [{"name": "train", "num_bytes": 11835522, "num_examples": 25000}, {"name": "test", "num_bytes": 11799518, "num_examples": 25000}], "download_size": 13580773, "dataset_size": 23635040}}
|
2023-05-03T07:46:12+00:00
|
d8e8dafbe4ecd76c188c41e4701fb7014ef78ee7
|
# Dataset Card for "chain-of-thoughts-chatml"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
AlekseyKorshuk/chain-of-thoughts-chatml
|
[
"region:us"
] |
2023-05-02T22:18:20+00:00
|
{"dataset_info": {"features": [{"name": "conversation", "list": [{"name": "content", "dtype": "string"}, {"name": "do_train", "dtype": "bool"}, {"name": "role", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 32696810, "num_examples": 74771}], "download_size": 12852096, "dataset_size": 32696810}}
|
2023-06-05T22:13:19+00:00
|
70f9d1e5e1a697fe35830875cfc7de1dd590d727
|
# LLaVA Visual Instruct Pretrain Dataset Card
## Dataset details
**Dataset type:**
LLaVA Visual Instruct Pretrain LCS-558K is a subset of LAION/CC/SBU dataset, filtered with a more balanced concept coverage distribution.
Captions are also associated with [BLIP synthetic caption](https://github.com/salesforce/BLIP#pre-training-datasets-download) for reference.
It is constructed for the pretraining stage for feature alignment in visual instruction tuning.
We aim to build large multimodal towards GPT-4 vision/language capability.
**Dataset date:**
LLaVA Visual Instruct CC3M Pretrain 595K was created in May 2023.
**Dataset structure:**
- `blip_laion_cc_sbu_558k.json` contains the multimodal synthesized conversation from the image-caption pairs, by adding randomly selected instructions like: "Describe this image". It is used for pretraining in LLaVA. We use the raw CC-3M caption as the default answer.
- `blip_laion_cc_sbu_558k_meta.json` contains the meta data of the image file name, image URL, synthetic BLIP caption.
- `images.zip` contains all raw images of the filtered subset from LAION/CC/SBU. Important notice: Upon the request from the community, as ~15% images of the original LAION/CC/SBU dataset are no longer accessible, we upload images.zip for better reproducing our work in research community. It should not be used for any other purpose. The use of these images must comply with the LAION/CC/SBU license. This may be taken down when requested by the original LAION/CC/SBU dataset owner or owners of the referenced images.
**Paper or resources for more information:**
https://llava-vl.github.io/
**License:**
Must comply with license of [CC-3M](https://github.com/google-research-datasets/conceptual-captions/blob/master/LICENSE), [BLIP](https://github.com/salesforce/BLIP/blob/main/LICENSE.txt) (if you use their synthetic caption).
CC-3M
The dataset may be freely used for any purpose, although acknowledgement of
Google LLC ("Google") as the data source would be appreciated. The dataset is
provided "AS IS" without any warranty, express or implied. Google disclaims all
liability for any damages, direct or indirect, resulting from the use of the
dataset.
**Where to send questions or comments about the model:**
https://github.com/haotian-liu/LLaVA/issues
## Intended use
**Primary intended uses:**
The primary use of LLaVA is research on large multimodal models and chatbots.
**Primary intended users:**
The primary intended users of the model are researchers and hobbyists in computer vision, natural language processing, machine learning, and artificial intelligence.
|
liuhaotian/LLaVA-Pretrain
|
[
"language:en",
"license:other",
"region:us"
] |
2023-05-02T22:55:26+00:00
|
{"language": ["en"], "license": "other", "pretty_name": "LLaVA Pretrain"}
|
2023-07-06T07:47:38+00:00
|
a11edd52df104054a492ea362ebd36aa2ba15393
|
# Dataset Card for "ak_edit_issue_analysis_128_v2_with_zl-reward"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
AlekseyKorshuk/ak_edit_issue_analysis_128_v2_with_zl-reward
|
[
"region:us"
] |
2023-05-02T22:57:00+00:00
|
{"dataset_info": {"features": [{"name": "input_text", "dtype": "string"}, {"name": "response", "dtype": "string"}, {"name": "output_text", "dtype": "string"}, {"name": "user_id", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}, {"name": "ak-edit-finetuned-triton-v0", "dtype": "string"}, {"name": "ak-edit-finetuned-triton-v1", "dtype": "string"}, {"name": "ak-edit-finetuned-triton-v2", "dtype": "string"}, {"name": "ak-0324-sft-no-reward", "dtype": "string"}, {"name": "edit-sft-gptj-distil-v1-ak-test", "dtype": "string"}, {"name": "continue_50m__edit-sft-gptj-distil-v1-ak-test", "dtype": "float64"}, {"name": "retry_12m__edit-sft-gptj-distil-v1-ak-test", "dtype": "float64"}, {"name": "stars_2m__edit-sft-gptj-distil-v1-ak-test", "dtype": "float64"}, {"name": "retry_and_continue_12m__edit-sft-gptj-distil-v1-ak-test", "dtype": "float64"}, {"name": "continue_50m__ak-edit-finetuned-triton-v0", "dtype": "float64"}, {"name": "retry_12m__ak-edit-finetuned-triton-v0", "dtype": "float64"}, {"name": "stars_2m__ak-edit-finetuned-triton-v0", "dtype": "float64"}, {"name": "retry_and_continue_12m__ak-edit-finetuned-triton-v0", "dtype": "float64"}, {"name": "continue_50m__ak-edit-finetuned-triton-v1", "dtype": "float64"}, {"name": "retry_12m__ak-edit-finetuned-triton-v1", "dtype": "float64"}, {"name": "stars_2m__ak-edit-finetuned-triton-v1", "dtype": "float64"}, {"name": "retry_and_continue_12m__ak-edit-finetuned-triton-v1", "dtype": "float64"}, {"name": "continue_50m__ak-edit-finetuned-triton-v2", "dtype": "float64"}, {"name": "retry_12m__ak-edit-finetuned-triton-v2", "dtype": "float64"}, {"name": "stars_2m__ak-edit-finetuned-triton-v2", "dtype": "float64"}, {"name": "retry_and_continue_12m__ak-edit-finetuned-triton-v2", "dtype": "float64"}, {"name": "continue_50m__ak-0324-sft-no-reward", "dtype": "float64"}, {"name": "retry_12m__ak-0324-sft-no-reward", "dtype": "float64"}, {"name": "stars_2m__ak-0324-sft-no-reward", "dtype": "float64"}, {"name": "retry_and_continue_12m__ak-0324-sft-no-reward", "dtype": "float64"}], "splits": [{"name": "completion_issue", "num_bytes": 29229612, "num_examples": 9647}, {"name": "garbage_issue", "num_bytes": 10185442, "num_examples": 3565}, {"name": "gender_issue", "num_bytes": 15038448, "num_examples": 4405}], "download_size": 27688286, "dataset_size": 54453502}}
|
2023-05-02T22:57:06+00:00
|
a819b714c690ff89c88ccc32115f8e5e2bdd322a
|
# Dataset Card for "instinwild-chatml"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
AlekseyKorshuk/instinwild-chatml
|
[
"region:us"
] |
2023-05-02T23:06:36+00:00
|
{"dataset_info": {"features": [{"name": "conversation", "list": [{"name": "content", "dtype": "string"}, {"name": "do_train", "dtype": "bool"}, {"name": "role", "dtype": "string"}]}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 74114357, "num_examples": 52191}], "download_size": 39478067, "dataset_size": 74114357}}
|
2023-06-07T15:40:30+00:00
|
08f657c7b1818fc58f42ed42125287953b439645
|
This is PeerSum, a multi-document summarization dataset in the peer-review domain. More details can be found in the paper accepted at EMNLP 2023, [Summarizing Multiple Documents with Conversational Structure for Meta-review Generation](https://arxiv.org/abs/2305.01498). The original code and datasets are public on [GitHub](https://github.com/oaimli/PeerSum).
Please use the following code to download the dataset with the datasets library from Huggingface.
```python
from datasets import load_dataset
peersum_all = load_dataset('oaimli/PeerSum', split='all')
peersum_train = peersum_all.filter(lambda s: s['label'] == 'train')
peersum_val = peersum_all.filter(lambda s: s['label'] == 'val')
peersum_test = peersum_all.filter(lambda s: s['label'] == 'test')
```
The Huggingface dataset is mainly for multi-document summarization. Each sample comprises information with the following keys:
```
* paper_id: str (a link to the raw data)
* paper_title: str
* paper_abstract, str
* paper_acceptance, str
* meta_review, str
* review_ids, list(str)
* review_writers, list(str)
* review_contents, list(str)
* review_ratings, list(int)
* review_confidences, list(int)
* review_reply_tos, list(str)
* label, str, (train, val, test)
```
You can also download the raw data from [Google Drive](https://drive.google.com/drive/folders/1SGYvxY1vOZF2MpDn3B-apdWHCIfpN2uB?usp=sharing). The raw data comprises more information and it can be used for other analysis for peer reviews.
|
oaimli/PeerSum
|
[
"task_categories:summarization",
"size_categories:10K<n<100K",
"language:en",
"license:apache-2.0",
"arxiv:2305.01498",
"region:us"
] |
2023-05-02T23:12:05+00:00
|
{"language": ["en"], "license": "apache-2.0", "size_categories": ["10K<n<100K"], "task_categories": ["summarization"], "pretty_name": "PeerSum"}
|
2023-10-08T04:31:38+00:00
|
b8652ecb3af5b4c2f9fb2897e709809467b6a1e5
|
# Dataset Card for "camel-chatml"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
AlekseyKorshuk/camel-chatml
|
[
"region:us"
] |
2023-05-02T23:28:57+00:00
|
{"dataset_info": {"features": [{"name": "conversation", "list": [{"name": "content", "dtype": "string"}, {"name": "do_train", "dtype": "bool"}, {"name": "role", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 226701847.0, "num_examples": 110000}], "download_size": 106777582, "dataset_size": 226701847.0}}
|
2023-06-05T22:16:36+00:00
|
8a8954bb64eede476199f471f063488ad6471c01
|
nateraw/spaces-monitoring
|
[
"license:mit",
"region:us"
] |
2023-05-02T23:29:57+00:00
|
{"license": "mit"}
|
2023-06-15T13:08:21+00:00
|
|
a122388b996d81e28bbc1879c909279c6c7f0862
|
Arris/twitter-the-algorithm-faiss
|
[
"license:mit",
"region:us"
] |
2023-05-02T23:41:08+00:00
|
{"license": "mit"}
|
2023-05-02T23:53:55+00:00
|
|
6e04dff3ae63a16cc3e13125714bd9ab52a5981a
|
# Dataset Card for "ag_news_fr"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
christykoh/ag_news_fr
|
[
"region:us"
] |
2023-05-02T23:43:45+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "World", "1": "Sports", "2": "Business", "3": "Sci/Tech"}}}}], "splits": [{"name": "train", "num_bytes": 30871400, "num_examples": 120000}, {"name": "test", "num_bytes": 1945500, "num_examples": 7600}], "download_size": 19675012, "dataset_size": 32816900}}
|
2023-05-03T02:50:19+00:00
|
ed7851a0b5503a7a35d43af78eead3ce1a84cdb8
|
# Dataset Card for "ag_news_zh"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
christykoh/ag_news_zh
|
[
"region:us"
] |
2023-05-02T23:44:21+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "World", "1": "Sports", "2": "Business", "3": "Sci/Tech"}}}}], "splits": [{"name": "train", "num_bytes": 23042282, "num_examples": 120000}, {"name": "test", "num_bytes": 1458254, "num_examples": 7600}], "download_size": 16791683, "dataset_size": 24500536}}
|
2023-05-03T03:44:50+00:00
|
95f22307ccc1635a897e4982a5b3c42abe99a6df
|
# Dataset Card for "GeoTrainr_data"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
icheng3/GeoTrainr_data
|
[
"region:us"
] |
2023-05-02T23:51:08+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "Albania", "1": "Andorra", "2": "Austria", "3": "Belarus", "4": "Belgium", "5": "Bosnia and Herzegovina", "6": "Bulgaria", "7": "Croatia", "8": "Cyprus", "9": "Czechia", "10": "Denmark", "11": "Estonia", "12": "Finland", "13": "France", "14": "Germany", "15": "Greece", "16": "Holy See", "17": "Hungary", "18": "Iceland", "19": "Ireland", "20": "Italy", "21": "Kosovo", "22": "Latvia", "23": "Liechtenstein", "24": "Lithuania", "25": "Luxembourg", "26": "Malta", "27": "Moldova, Republic of", "28": "Monaco", "29": "Montenegro", "30": "Netherlands", "31": "North Macedonia", "32": "Norway", "33": "Poland", "34": "Portugal", "35": "Romania", "36": "Russian Federation", "37": "San Marino", "38": "Serbia", "39": "Slovakia", "40": "Slovenia", "41": "Spain", "42": "Sweden", "43": "Switzerland", "44": "Ukraine", "45": "United Kingdom of Great Britain and Northern Ireland"}}}}], "splits": [{"name": "train", "num_bytes": 266385473.045, "num_examples": 16465}, {"name": "test", "num_bytes": 32803712.1, "num_examples": 2058}, {"name": "validation", "num_bytes": 33005454.167, "num_examples": 2059}], "download_size": 329502054, "dataset_size": 332194639.312}}
|
2023-05-03T00:09:50+00:00
|
1c80f1af00e2786966a8d8d37568b6f523dc2371
|
# Dataset Card for "ag_news_ar"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
christykoh/ag_news_ar
|
[
"region:us"
] |
2023-05-02T23:55:10+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "World", "1": "Sports", "2": "Business", "3": "Sci/Tech"}}}}], "splits": [{"name": "train", "num_bytes": 41887724, "num_examples": 120000}, {"name": "test", "num_bytes": 2650172, "num_examples": 7600}], "download_size": 23151161, "dataset_size": 44537896}}
|
2023-05-03T03:44:12+00:00
|
0650b509aab972cd9a7e941d5fa56309f19ab0bb
|
A port of [miniF2F](https://github.com/facebookresearch/miniF2F) to Lean 4.
|
hoskinson-center/minif2f-lean4
|
[
"license:mit",
"region:us"
] |
2023-05-03T00:27:24+00:00
|
{"license": "mit"}
|
2023-05-03T20:20:52+00:00
|
efa939b77957e96d5e1f96e1ac611e73245ba37e
|
# Dataset Card for "thesis-raw-dataset"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
UchihaMadara/thesis-raw-dataset
|
[
"region:us"
] |
2023-05-03T00:34:35+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "sentiments", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 367502, "num_examples": 3043}, {"name": "test", "num_bytes": 98465, "num_examples": 800}], "download_size": 208422, "dataset_size": 465967}}
|
2023-05-03T00:34:38+00:00
|
ea077dbeb5715e7765308220c1d8c6e7d36cfd52
|
# Dataset Card for "fake-w2-us-tax-form-dataset"
This is a dataset of synthetically generated US Tax Return W2 Forms, with generated fake data such as names, ids, dates and addresses. Only real city, state and zipcodes have been used.
This dataset is created from the existing public [Fake W-2 (US Tax Form) Dataset](https://www.kaggle.com/datasets/mcvishnu1/fake-w2-us-tax-form-dataset) dataset for use with
🤗
|
singhsays/fake-w2-us-tax-form-dataset
|
[
"task_categories:table-to-text",
"size_categories:1K<n<10K",
"language:en",
"finance",
"region:us"
] |
2023-05-03T02:23:18+00:00
|
{"language": ["en"], "size_categories": ["1K<n<10K"], "task_categories": ["table-to-text"], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "ground_truth", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 280673107.2, "num_examples": 1800}, {"name": "test", "num_bytes": 15578492.9, "num_examples": 100}, {"name": "validation", "num_bytes": 15593664.9, "num_examples": 100}], "download_size": 309564410, "dataset_size": 311845264.99999994}, "tags": ["finance"]}
|
2023-06-06T02:51:23+00:00
|
7a9ac1dea906bdacd1f5f3310adf98ac190150b0
|
# Dataset Card for "medmcqa-rule-neg-prepend"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
joey234/medmcqa-rule-neg-prepend
|
[
"region:us"
] |
2023-05-03T02:54:24+00:00
|
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "opa", "dtype": "string"}, {"name": "opb", "dtype": "string"}, {"name": "opc", "dtype": "string"}, {"name": "opd", "dtype": "string"}, {"name": "cop", "dtype": {"class_label": {"names": {"0": "a", "1": "b", "2": "c", "3": "d"}}}}, {"name": "choice_type", "dtype": "string"}, {"name": "exp", "dtype": "string"}, {"name": "subject_name", "dtype": "string"}, {"name": "topic_name", "dtype": "string"}, {"name": "neg_prompt", "dtype": "string"}, {"name": "neg_answer", "dtype": "string"}], "splits": [{"name": "validation", "num_bytes": 3084420, "num_examples": 4183}], "download_size": 2008481, "dataset_size": 3084420}}
|
2023-05-18T03:21:10+00:00
|
589529341284594c3e9a5fe158da02f4bdc752d2
|
gabrielwu/genealogy_synthetic
|
[
"license:unknown",
"region:us"
] |
2023-05-03T03:49:34+00:00
|
{"license": "unknown"}
|
2023-05-03T21:16:56+00:00
|
|
e972d55bcd058b4cf7bad50b7b108d78e6071d19
|
KyonBS/hibinoMina-TakagiSan
|
[
"license:openrail",
"region:us"
] |
2023-05-03T03:49:42+00:00
|
{"license": "openrail"}
|
2023-05-03T03:50:13+00:00
|
|
bab82a2ebdc750a0134ddcd0d5813867b92eed2a
|
Corresponding GitHub repo can be found here:
https://github.com/leap-stc/ClimSim
Read more: https://arxiv.org/abs/2306.08754.
|
LEAP/ClimSim_low-res
|
[
"license:cc-by-4.0",
"arxiv:2306.08754",
"doi:10.57967/hf/0740",
"region:us"
] |
2023-05-03T03:51:53+00:00
|
{"license": "cc-by-4.0"}
|
2023-09-29T19:31:55+00:00
|
66061e8c5818134bbd07288f30b62f07174f3f31
|
mrnajkh/ps_pipcache
|
[
"license:unknown",
"region:us"
] |
2023-05-03T03:54:08+00:00
|
{"license": "unknown"}
|
2023-05-03T03:59:06+00:00
|
|
00d00c29aa891ab874def7667023d686c68e5d71
|
# Dataset Card for "deepfashion"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
lirus18/deepfashion
|
[
"region:us"
] |
2023-05-03T04:53:29+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "openpose", "dtype": "image"}, {"name": "cloth", "dtype": "image"}, {"name": "caption", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3491233280.847, "num_examples": 13679}], "download_size": 3401862421, "dataset_size": 3491233280.847}}
|
2023-05-03T05:06:22+00:00
|
e084169f08e97c59121bfba0c27013621367fa6f
|
AIARTCHAN/storage
|
[
"license:creativeml-openrail-m",
"region:us"
] |
2023-05-03T05:05:44+00:00
|
{"license": "creativeml-openrail-m"}
|
2023-12-20T11:24:06+00:00
|
|
d7f23dbd0af2d21a4aa9c990fe50b0ba64090a98
|
# Dataset Card for "boolq_it"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
reaganjlee/boolq_it
|
[
"region:us"
] |
2023-05-03T06:06:19+00:00
|
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "passage", "dtype": "string"}, {"name": "answer", "dtype": {"class_label": {"names": {"0": "False", "1": "True"}}}}], "splits": [{"name": "train", "num_bytes": 4638178, "num_examples": 9427}, {"name": "validation", "num_bytes": 1595818, "num_examples": 3270}], "download_size": 3883702, "dataset_size": 6233996}}
|
2023-08-18T22:14:14+00:00
|
36436671183b5428eac449c07aa4d2a5b46511fb
|
Dhika/defect
|
[
"license:unknown",
"region:us"
] |
2023-05-03T06:06:33+00:00
|
{"license": "unknown"}
|
2023-05-05T01:59:41+00:00
|
|
40da724667ce0b37d4dda07946eb983c14689236
|
Brain Tumor Detection | Vision Transformer 99%
Click -> [Kaggle](https://www.kaggle.com/code/miladfa7/brain-tumor-detection-vision-transformer-99)
---
task_categories:
- image-classification
- image-segmentation
tags:
- 'brain '
- MRI
- brain-MRI-images
- Tumor
---
|
miladfa7/Brain-MRI-Images-for-Brain-Tumor-Detection
|
[
"region:us"
] |
2023-05-03T06:11:39+00:00
|
{}
|
2023-05-16T16:11:04+00:00
|
64b624f4bb5394d42dda677d9afe372df4087075
|
desiai/samachaar
|
[
"license:odc-by",
"region:us"
] |
2023-05-03T06:30:04+00:00
|
{"license": "odc-by"}
|
2023-11-15T16:25:08+00:00
|
|
cca60f5b0c7f3250ccc27293e1cd4180bcffb78a
|
# Dataset Card for NLP4SGPapers
## Table of Contents
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Repository:** [NLP4SG](https://github.com/feradauto/nlp4sg)
- **Paper:**
- **Point of Contact:** [Zhijing Jin](mailto:[email protected]), [Fernando Gonzalez](mailto:[email protected])
### Dataset Summary
Scientific dataset with three associated tasks that can help identify NLP4SG papers.
### Languages
The language in the dataset is English.
## Dataset Structure
### Data Instances
Each instance is an annotated paper with title, abstract, year.
### Data Fields
- `ID`: Paper ID in ACL Anthology
- `url`: URL where the paper is available
- `title`: Title of the paper
- `abstract`: Abstract
- `label_nlp4sg`: Whether is an NLP4SG paper or not. For more info on the criteria check our paper
- `task`: List of tasks (Only available for the test set and for SG papers)
- `method`: List of methods (Only available for the test set and for SG papers)
- `goal1`: goal in string format
- `goal2`: goal in string format
- `goal3`: goal in string format
- `acknowledgments`: acknowledgments
- `year`: Year of publication
- `sdg1` to `sdg17`: Boolean value that indicates if the paper addresses the United Nations Social Development Goal.
### Data Splits
NLP4SGPapers contains train, test and validation splits.
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
Information about the data collection can be found in the appendix of [our paper].
### Personal and Sensitive Information
The NLP4SGPapers dataset does not have privacy concerns.
## Considerations for Using the Data
### Social Impact of Dataset
The intended use of this work is to help the creation of an overview of the NLP4SG research landscape.
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
The NLP4SGPapers dataset is licensed under a [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-nc-sa/4.0/).
### Citation Information
```
```
|
feradauto/NLP4SGPapers
|
[
"task_categories:text-classification",
"license:cc-by-nc-sa-4.0",
"region:us"
] |
2023-05-03T06:32:10+00:00
|
{"license": "cc-by-nc-sa-4.0", "task_categories": ["text-classification"], "pretty_name": "NLP4SGPapers"}
|
2023-05-03T16:37:12+00:00
|
be220f6c72d48b0f66c71d45ad51a6c08411a54b
|
# License Plates
Over **1.2 million** annotated license plates from vehicles around the world. This dataset is tailored for **License Plate Recognition tasks** and includes images from both YouTube and PlatesMania.
Annotation details are provided in the About section below.
# Get the dataset
### This is just an example of the data
Leave a request on [**https://trainingdata.pro/data-market**](https://trainingdata.pro/data-market?utm_source=huggingface&utm_medium=cpc&utm_campaign=license_plates) to discuss your requirements, learn about the price and buy the dataset.
# About
## Variables in .csv files:
- **file_name** - filename of the original car photo
- **license_plate.country** - country where the vehicle was captured
- **bbox** - normalized Bounding Box labeling of the car
- **license_plate.visibility** - the visibility type of the license plate
- **license_plate.id** - unique license plate's id
- **license_plate.mask** - normalized coordinates of the license plate
- **license_plate.rows_count** - single-line or double-line number
- **license_plate.number** - recognized text of the license plate
- **license_plate.serial** - only for UAE numbers - license plate series
- **license_plate.region** - only for UAE numbers - license plate subregion
- **license_plate.color** - only for Saudi Arabia - color of the international plate code
**How it works**: *go to the folder of the country, CSV-file contains all labeling information about images located in the subfolder "photos" of the corresponding folder.*
## [**TrainingData**](https://trainingdata.pro/data-market?utm_source=huggingface&utm_medium=cpc&utm_campaign=license_plates) provides high-quality data annotation tailored to your needs
More datasets in TrainingData's Kaggle account: **https://www.kaggle.com/trainingdatapro/datasets**
TrainingData's GitHub: **https://github.com/Trainingdata-datamarket/TrainingData_All_datasets**
|
TrainingDataPro/license_plates
|
[
"task_categories:image-to-text",
"language:en",
"license:cc-by-nc-nd-4.0",
"finance",
"region:us"
] |
2023-05-03T06:38:20+00:00
|
{"language": ["en"], "license": "cc-by-nc-nd-4.0", "task_categories": ["image-to-text"], "tags": ["finance"], "dataset_info": [{"config_name": "Brazil_youtube", "features": [{"name": "image", "dtype": "image"}, {"name": "labeled_image", "dtype": "image"}, {"name": "bbox", "dtype": "string"}, {"name": "license_plate.id", "dtype": "string"}, {"name": "license_plate.visibility", "dtype": "string"}, {"name": "license_plate.rows_count", "dtype": "uint8"}, {"name": "license_plate.number", "dtype": "string"}, {"name": "license_plate.serial", "dtype": "string"}, {"name": "license_plate.country", "dtype": "string"}, {"name": "license_plate.mask", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 173536648, "num_examples": 72}], "download_size": 22606962, "dataset_size": 173536648}, {"config_name": "Estonia_platesmania", "features": [{"name": "image", "dtype": "image"}, {"name": "labeled_image", "dtype": "image"}, {"name": "bbox", "dtype": "string"}, {"name": "license_plate.id", "dtype": "string"}, {"name": "license_plate.visibility", "dtype": "string"}, {"name": "license_plate.rows_count", "dtype": "uint8"}, {"name": "license_plate.number", "dtype": "string"}, {"name": "license_plate.serial", "dtype": "string"}, {"name": "license_plate.country", "dtype": "string"}, {"name": "license_plate.mask", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 7990452, "num_examples": 10}], "download_size": 7863164, "dataset_size": 7990452}, {"config_name": "Finland_platesmania", "features": [{"name": "image", "dtype": "image"}, {"name": "labeled_image", "dtype": "image"}, {"name": "bbox", "dtype": "string"}, {"name": "license_plate.id", "dtype": "string"}, {"name": "license_plate.visibility", "dtype": "string"}, {"name": "license_plate.rows_count", "dtype": "uint8"}, {"name": "license_plate.number", "dtype": "string"}, {"name": "license_plate.serial", "dtype": "string"}, {"name": "license_plate.country", "dtype": "string"}, {"name": "license_plate.mask", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 9650579, "num_examples": 10}], "download_size": 9485725, "dataset_size": 9650579}, {"config_name": "Kazakhstan_platesmania", "features": [{"name": "image", "dtype": "image"}, {"name": "labeled_image", "dtype": "image"}, {"name": "bbox", "dtype": "string"}, {"name": "license_plate.id", "dtype": "string"}, {"name": "license_plate.visibility", "dtype": "string"}, {"name": "license_plate.rows_count", "dtype": "uint8"}, {"name": "license_plate.number", "dtype": "string"}, {"name": "license_plate.serial", "dtype": "string"}, {"name": "license_plate.country", "dtype": "string"}, {"name": "license_plate.mask", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 14064541, "num_examples": 19}], "download_size": 7265915, "dataset_size": 14064541}, {"config_name": "Kazakhstan_youtube", "features": [{"name": "image", "dtype": "image"}, {"name": "labeled_image", "dtype": "image"}, {"name": "bbox", "dtype": "string"}, {"name": "license_plate.id", "dtype": "string"}, {"name": "license_plate.visibility", "dtype": "string"}, {"name": "license_plate.rows_count", "dtype": "uint8"}, {"name": "license_plate.number", "dtype": "string"}, {"name": "license_plate.serial", "dtype": "string"}, {"name": "license_plate.country", "dtype": "string"}, {"name": "license_plate.mask", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 6324396, "num_examples": 22}], "download_size": 2852873, "dataset_size": 6324396}, {"config_name": "Lithuania_platesmania", "features": [{"name": "image", "dtype": "image"}, {"name": "labeled_image", "dtype": "image"}, {"name": "bbox", "dtype": "string"}, {"name": "license_plate.id", "dtype": "string"}, {"name": "license_plate.visibility", "dtype": "string"}, {"name": "license_plate.rows_count", "dtype": "uint8"}, {"name": "license_plate.number", "dtype": "string"}, {"name": "license_plate.serial", "dtype": "string"}, {"name": "license_plate.country", "dtype": "string"}, {"name": "license_plate.mask", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 8127614, "num_examples": 10}], "download_size": 7940839, "dataset_size": 8127614}, {"config_name": "Serbia_platesmania", "features": [{"name": "image", "dtype": "image"}, {"name": "labeled_image", "dtype": "image"}, {"name": "bbox", "dtype": "string"}, {"name": "license_plate.id", "dtype": "string"}, {"name": "license_plate.visibility", "dtype": "string"}, {"name": "license_plate.rows_count", "dtype": "uint8"}, {"name": "license_plate.number", "dtype": "string"}, {"name": "license_plate.serial", "dtype": "string"}, {"name": "license_plate.country", "dtype": "string"}, {"name": "license_plate.mask", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 10000777, "num_examples": 10}], "download_size": 9808356, "dataset_size": 10000777}, {"config_name": "Serbia_youtube", "features": [{"name": "image", "dtype": "image"}, {"name": "labeled_image", "dtype": "image"}, {"name": "bbox", "dtype": "string"}, {"name": "license_plate.id", "dtype": "string"}, {"name": "license_plate.visibility", "dtype": "string"}, {"name": "license_plate.rows_count", "dtype": "uint8"}, {"name": "license_plate.number", "dtype": "string"}, {"name": "license_plate.serial", "dtype": "string"}, {"name": "license_plate.country", "dtype": "string"}, {"name": "license_plate.mask", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 26535839, "num_examples": 67}], "download_size": 4044272, "dataset_size": 26535839}, {"config_name": "UAE_platesmania", "features": [{"name": "image", "dtype": "image"}, {"name": "labeled_image", "dtype": "image"}, {"name": "bbox", "dtype": "string"}, {"name": "license_plate.id", "dtype": "string"}, {"name": "license_plate.visibility", "dtype": "string"}, {"name": "license_plate.rows_count", "dtype": "uint8"}, {"name": "license_plate.number", "dtype": "string"}, {"name": "license_plate.serial", "dtype": "string"}, {"name": "license_plate.country", "dtype": "string"}, {"name": "license_plate.mask", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 8236358, "num_examples": 10}], "download_size": 8028800, "dataset_size": 8236358}, {"config_name": "UAE_youtube", "features": [{"name": "image", "dtype": "image"}, {"name": "labeled_image", "dtype": "image"}, {"name": "bbox", "dtype": "string"}, {"name": "license_plate.id", "dtype": "string"}, {"name": "license_plate.visibility", "dtype": "string"}, {"name": "license_plate.rows_count", "dtype": "uint8"}, {"name": "license_plate.number", "dtype": "string"}, {"name": "license_plate.serial", "dtype": "string"}, {"name": "license_plate.country", "dtype": "string"}, {"name": "license_plate.mask", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 41202317, "num_examples": 162}], "download_size": 2666314, "dataset_size": 41202317}]}
|
2023-09-14T15:42:28+00:00
|
7242ec26b5eb08a6c31b537810c2bf897982a244
|
# bollywood-celebs
## Dataset Description
This dataset has been automatically processed by AutoTrain for project bollywood-celebs.
Credits: https://www.kaggle.com/datasets/sushilyadav1998/bollywood-celeb-localized-face-dataset
### Languages
The BCP-47 code for the dataset's language is unk.
## Dataset Structure
### Data Instances
A sample from this dataset looks as follows:
```json
[
{
"image": "<64x64 RGB PIL image>",
"target": 15
},
{
"image": "<64x64 RGB PIL image>",
"target": 82
}
]
```
### Dataset Fields
The dataset has the following fields (also called "features"):
```json
{
"image": "Image(decode=True, id=None)",
"target": "ClassLabel(names=['Aamir_Khan', 'Abhay_Deol', 'Abhishek_Bachchan', 'Aftab_Shivdasani', 'Aishwarya_Rai', 'Ajay_Devgn', 'Akshay_Kumar', 'Akshaye_Khanna', 'Alia_Bhatt', 'Ameesha_Patel', 'Amitabh_Bachchan', 'Amrita_Rao', 'Amy_Jackson', 'Anil_Kapoor', 'Anushka_Sharma', 'Anushka_Shetty', 'Arjun_Kapoor', 'Arjun_Rampal', 'Arshad_Warsi', 'Asin', 'Ayushmann_Khurrana', 'Bhumi_Pednekar', 'Bipasha_Basu', 'Bobby_Deol', 'Deepika_Padukone', 'Disha_Patani', 'Emraan_Hashmi', 'Esha_Gupta', 'Farhan_Akhtar', 'Govinda', 'Hrithik_Roshan', 'Huma_Qureshi', 'Ileana_DCruz', 'Irrfan_Khan', 'Jacqueline_Fernandez', 'John_Abraham', 'Juhi_Chawla', 'Kajal_Aggarwal', 'Kajol', 'Kangana_Ranaut', 'Kareena_Kapoor', 'Karisma_Kapoor', 'Kartik_Aaryan', 'Katrina_Kaif', 'Kiara_Advani', 'Kriti_Kharbanda', 'Kriti_Sanon', 'Kunal_Khemu', 'Lara_Dutta', 'Madhuri_Dixit', 'Manoj_Bajpayee', 'Mrunal_Thakur', 'Nana_Patekar', 'Nargis_Fakhri', 'Naseeruddin_Shah', 'Nushrat_Bharucha', 'Paresh_Rawal', 'Parineeti_Chopra', 'Pooja_Hegde', 'Prabhas', 'Prachi_Desai', 'Preity_Zinta', 'Priyanka_Chopra', 'R_Madhavan', 'Rajkummar_Rao', 'Ranbir_Kapoor', 'Randeep_Hooda', 'Rani_Mukerji', 'Ranveer_Singh', 'Richa_Chadda', 'Riteish_Deshmukh', 'Saif_Ali_Khan', 'Salman_Khan', 'Sanjay_Dutt', 'Sara_Ali_Khan', 'Shah_Rukh_Khan', 'Shahid_Kapoor', 'Shilpa_Shetty', 'Shraddha_Kapoor', 'Shreyas_Talpade', 'Shruti_Haasan', 'Sidharth_Malhotra', 'Sonakshi_Sinha', 'Sonam_Kapoor', 'Suniel_Shetty', 'Sunny_Deol', 'Sushant_Singh_Rajput', 'Taapsee_Pannu', 'Tabu', 'Tamannaah_Bhatia', 'Tiger_Shroff', 'Tusshar_Kapoor', 'Uday_Chopra', 'Vaani_Kapoor', 'Varun_Dhawan', 'Vicky_Kaushal', 'Vidya_Balan', 'Vivek_Oberoi', 'Yami_Gautam', 'Zareen_Khan'], id=None)"
}
```
### Dataset Splits
This dataset is split into a train and validation split. The split sizes are as follow:
| Split name | Num samples |
| ------------ | ------------------- |
| train | 6863 |
| valid | 1764 |
|
amitpuri/bollywood-celebs
|
[
"task_categories:image-classification",
"language:en",
"license:mit",
"region:us"
] |
2023-05-03T06:55:38+00:00
|
{"language": ["en"], "license": "mit", "task_categories": ["image-classification"], "pretty_name": " bollywood-celebs"}
|
2023-05-17T16:19:53+00:00
|
83b473866ae13c4cbf121dd5bdb150e314499653
|
DAPS Dataset
============
## Features Format
```json
{
'recording_environment': Value(dtype='string', id=None),
'speaker_id': Value(dtype='string', id=None),
'script_id': Value(dtype='string', id=None),
'clean_path': Value(dtype='string', id=None),
'produced_path': Value(dtype='string', id=None),
'device_path': Value(dtype='string', id=None),
'clean_audio': Audio(sampling_rate=44100, mono=True, decode=True, id=None),
'produced_audio': Audio(sampling_rate=44100, mono=True, decode=True, id=None),
'device_audio': Audio(sampling_rate=44100, mono=True, decode=True, id=None)
}
```
Created By
----------
Gautham J. Mysore, Adobe Research
Description
-----------
The DAPS (Device and Produced Speech) dataset is a collection of aligned versions of professionally produced studio speech recordings and recordings of the same speech on common consumer devices (tablet and smartphone) in real-world environments. It has 15 versions of audio (3 professional versions and 12 consumer device/real-world environment combinations). Each version consists of about 4 1/2 hours of data (about 14 minutes from each of 20 speakers). Please see this paper for a detailed description of the dataset:
Gautham J. Mysore, “Can We Automatically Transform Speech Recorded on Common Consumer Devices in Real-World Environments into Professional Production Quality Speech? - A Dataset, Insights, and Challenges”, in the IEEE Signal Processing Letters, Vol. 22, No. 8, August 2015
The primary goal of the dataset is to help develop methods to automatically convert real-world device recordings into professional sounding recordings. It can be also used for various other applications like voice conversion, traditional speech enhancement, and automatic production of studio recordings.
Audio Files Included
--------------------
Each version contains 100 wave files - 20 speakers reading 5 scripts each (about 14 minutes of data per speaker). We provide a separate folder for each version.
-----
Description of Studio Recordings -
The versions are:
- cleanraw - Original clean studio recording, which includes speech as well as non-speech sounds such as breaths and mouth sounds.
- clean - A version of cleanraw with most of the non-speech sounds carefully removed by a professional sound engineer.
- produced - A version of clean with effects and processing such as EQ and compression applied by the same sound engineer. This is the final studio version.
The file naming convention is:
speaker_script_version.wav
For example, f2_script4_produced.wav, is the professionally produced version of the second female speaker reading the fourth script .
-----
Description of Device Recordings -
Devices:
- ipad - An iPad Air was placed on a stand to simulate a person holding it. This recording was done in all environments.
- ipadflat - An iPad Air was placed flat on a table. This recording was done in two environments.
- iphone - An iPhone 5S was placed on a stand to simulate a person holding it. This recording was done in three environments.
Environments:
- office1 - more reverberant office
- office2 - less reverberant office
- confroom1 - smaller conference room
- confroom2 - larger conference room
- livingroom1 - relatively reverberant living room with occasional traffic noise from outside
- bedroom1 - bedroom with occasional traffic noise from outside
- balcony1 - balcony with heavy traffic noise (this was used as a stress test and has significantly higher noise than all other environments)
The file naming convention is:
speaker_script_device_room.wav
For example, m5_script1_ipad_office1.wav, is the iPad recording in the first office of the fifth male speaker reading the first script.
-----
Sample files -
In the sample folder, we provide all versions of a single speaker reading a single script. It is a quick way to listen to the differences between versions. We also provide an Adobe Audition session file with all sample files for convenience.
Additional Files Included
------------------------
The folder called supplementary_files contains the actual scripts read by the speakers as well as a set of Matlab files to assist in creating new device recordings.
Please Acknowledge DAPS in Academic Research
----------------------------------------------------
When DAPS is used for academic research, we would highly appreciate it if scientific publications of works partly based on the DAPS dataset cite the following publication:
Gautham J. Mysore, “Can We Automatically Transform Speech Recorded on Common Consumer Devices in Real-World Environments into Professional Production Quality Speech? - A Dataset, Insights, and Challenges”, in the IEEE Signal Processing Letters, Vol. 22, No. 8, August 2015
License
----------------------------------------------------
The DAPS dataset is licensed under Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0)
|
corvj/daps
|
[
"language:en",
"region:us"
] |
2023-05-03T06:57:39+00:00
|
{"language": ["en"], "pretty_name": "Device and Produced Speech Dataset"}
|
2023-05-03T11:52:18+00:00
|
64ccf46576a571e80ea50d9495083bce6d372246
|
# Dataset Card for "truthful_qa_mc_hu"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
reaganjlee/truthful_qa_mc_hu
|
[
"region:us"
] |
2023-05-03T07:02:49+00:00
|
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "A", "1": "B", "2": "C", "3": "D"}}}}], "splits": [{"name": "train", "num_bytes": 214226, "num_examples": 684}], "download_size": 115841, "dataset_size": 214226}}
|
2023-05-03T07:02:52+00:00
|
40dc2d270b207be2ff2aeb8c7173d0bd91b216d5
|
# Dataset Card for "truthful_qa_mc_fi"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
reaganjlee/truthful_qa_mc_fi
|
[
"region:us"
] |
2023-05-03T07:05:45+00:00
|
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "A", "1": "B", "2": "C", "3": "D"}}}}], "splits": [{"name": "train", "num_bytes": 207496, "num_examples": 684}], "download_size": 110922, "dataset_size": 207496}}
|
2023-05-03T07:05:47+00:00
|
2c99decbbea5427cf16316df8db33b6238bc058d
|
# Dataset Card for "truthful_qa_mc_tr"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
reaganjlee/truthful_qa_mc_tr
|
[
"region:us"
] |
2023-05-03T07:09:04+00:00
|
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "A", "1": "B", "2": "C", "3": "D"}}}}], "splits": [{"name": "train", "num_bytes": 209883, "num_examples": 684}], "download_size": 109046, "dataset_size": 209883}}
|
2023-05-03T07:09:07+00:00
|
6699424a8bcbacb1755484fd0e4ecc9bc614525b
|
# Dataset Card for "batch_indexing_machine_224x224_images"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Circularmachines/batch_indexing_machine_224x224_images
|
[
"region:us"
] |
2023-05-03T07:11:02+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 1200580554.73, "num_examples": 72510}], "download_size": 1200450555, "dataset_size": 1200580554.73}}
|
2023-05-03T07:11:57+00:00
|
ed40131b56ce86ce3666f2942953595dd9d29608
|
# LHM-Dienstleistungen-QA - german public domain question-answering dataset
Datasets created based on data from Munich city administration.
Format inspired by GermanQuAD.
## Annotated by:
- Institute for Applied Artificial Intelligence: Leon Marius Schröder
- BettercallPaul GmbH: Clemens Gutknecht, Oubada Alkiddeh, Susanne Weiß
- Stadt München: Leon Lukas
## Data basis
Texts taken from the “Dienstleistungsfinder“ of the city of Munich administration.
There information about services offered by city is presented online.
Information ranges from applying for an ID card to dispose of garbage.
- https://stadt.muenchen.de/service/ (Date 11/2022)
## Dataset statistics
- Shortest Question: 13 Characters
- Average Question: 68 Characters
- Longest Question: 183 Characters
### Distribution of first sentence beginnings

### Distribution of first sentence beginnings: Wie

### Distribution of first sentence beginnings: Wo

### Distribution of first sentence beginnings: Was

## Models trained using this datset
### QA
- cgutknecht/gelectra_large_gsqd-gq-LHM
### DPR
- schreon/xnext-lhm_queries_encoder
- schreon/xnext-lhm_passages_encoder
|
it-at-m/LHM-Dienstleistungen-QA
|
[
"task_categories:question-answering",
"size_categories:1K<n<10K",
"language:de",
"license:mit",
"QA",
"region:us"
] |
2023-05-03T07:35:39+00:00
|
{"language": ["de"], "license": "mit", "size_categories": ["1K<n<10K"], "task_categories": ["question-answering"], "pretty_name": "LHM Dienstleistungen: QA", "tags": ["QA"], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answers", "struct": [{"name": "answer_start", "sequence": "int64"}, {"name": "text", "sequence": "string"}]}], "splits": [{"name": "test", "num_bytes": 560403, "num_examples": 357}, {"name": "train", "num_bytes": 2826731, "num_examples": 1773}], "download_size": 710027, "dataset_size": 3387134}}
|
2024-01-23T12:30:01+00:00
|
f5a100cc621e8ca7dab18aeb39f046cbce33e21e
|
annakarl/barrier
|
[
"license:openrail",
"region:us"
] |
2023-05-03T07:57:42+00:00
|
{"license": "openrail"}
|
2023-05-03T07:57:42+00:00
|
|
c28a323b6647ae86dff464539dc139f1a0089090
|
## Misogynistic statements and their potential restructuring
Beta dataset
Generated by GPT3.5
Language: Spanish
|
glombardo/misogynistic-statements-and-their-potential-restructuring
|
[
"task_categories:text2text-generation",
"task_categories:text-classification",
"size_categories:n<1K",
"language:es",
"license:cc-by-nc-4.0",
"region:us"
] |
2023-05-03T08:00:22+00:00
|
{"language": ["es"], "license": "cc-by-nc-4.0", "size_categories": ["n<1K"], "task_categories": ["text2text-generation", "text-classification"], "pretty_name": "Misogynistic statements and their potential restructuring", "dataset_info": {"features": [{"name": "misogynistic", "dtype": "string"}, {"name": "reformulation", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 24000, "num_examples": 121}, {"name": "validation", "num_bytes": 8253, "num_examples": 41}, {"name": "test", "num_bytes": 8346, "num_examples": 41}], "download_size": 28877, "dataset_size": 40599}}
|
2023-05-28T16:56:43+00:00
|
0d3670ca62ec6ed44c317da96cdd185e3379f331
|
# Dataset Card for "phpbb"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
lponsard/phpbb
|
[
"region:us"
] |
2023-05-03T08:01:31+00:00
|
{"dataset_info": {"features": [{"name": "labels", "dtype": "int64"}, {"name": "post_subject", "dtype": "string"}, {"name": "post_text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 81017.6, "num_examples": 116}, {"name": "test", "num_bytes": 20254.4, "num_examples": 29}], "download_size": 59163, "dataset_size": 101272.0}}
|
2023-05-04T13:40:02+00:00
|
cccd69e37509d8e98e5a89ba3e3828e162328333
|
# Dataset Card for "github-issues"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
RiccardoGvn/github-issues
|
[
"region:us"
] |
2023-05-03T08:38:02+00:00
|
{"dataset_info": {"features": [{"name": "url", "dtype": "string"}, {"name": "repository_url", "dtype": "string"}, {"name": "labels_url", "dtype": "string"}, {"name": "comments_url", "dtype": "string"}, {"name": "events_url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "number", "dtype": "int64"}, {"name": "title", "dtype": "string"}, {"name": "user", "struct": [{"name": "login", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "avatar_url", "dtype": "string"}, {"name": "gravatar_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "followers_url", "dtype": "string"}, {"name": "following_url", "dtype": "string"}, {"name": "gists_url", "dtype": "string"}, {"name": "starred_url", "dtype": "string"}, {"name": "subscriptions_url", "dtype": "string"}, {"name": "organizations_url", "dtype": "string"}, {"name": "repos_url", "dtype": "string"}, {"name": "events_url", "dtype": "string"}, {"name": "received_events_url", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "site_admin", "dtype": "bool"}]}, {"name": "labels", "list": [{"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "color", "dtype": "string"}, {"name": "default", "dtype": "bool"}, {"name": "description", "dtype": "string"}]}, {"name": "state", "dtype": "string"}, {"name": "locked", "dtype": "bool"}, {"name": "assignee", "struct": [{"name": "login", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "avatar_url", "dtype": "string"}, {"name": "gravatar_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "followers_url", "dtype": "string"}, {"name": "following_url", "dtype": "string"}, {"name": "gists_url", "dtype": "string"}, {"name": "starred_url", "dtype": "string"}, {"name": "subscriptions_url", "dtype": "string"}, {"name": "organizations_url", "dtype": "string"}, {"name": "repos_url", "dtype": "string"}, {"name": "events_url", "dtype": "string"}, {"name": "received_events_url", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "site_admin", "dtype": "bool"}]}, {"name": "assignees", "list": [{"name": "login", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "avatar_url", "dtype": "string"}, {"name": "gravatar_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "followers_url", "dtype": "string"}, {"name": "following_url", "dtype": "string"}, {"name": "gists_url", "dtype": "string"}, {"name": "starred_url", "dtype": "string"}, {"name": "subscriptions_url", "dtype": "string"}, {"name": "organizations_url", "dtype": "string"}, {"name": "repos_url", "dtype": "string"}, {"name": "events_url", "dtype": "string"}, {"name": "received_events_url", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "site_admin", "dtype": "bool"}]}, {"name": "milestone", "struct": [{"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "labels_url", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "number", "dtype": "int64"}, {"name": "title", "dtype": "string"}, {"name": "description", "dtype": "string"}, {"name": "creator", "struct": [{"name": "login", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "avatar_url", "dtype": "string"}, {"name": "gravatar_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "followers_url", "dtype": "string"}, {"name": "following_url", "dtype": "string"}, {"name": "gists_url", "dtype": "string"}, {"name": "starred_url", "dtype": "string"}, {"name": "subscriptions_url", "dtype": "string"}, {"name": "organizations_url", "dtype": "string"}, {"name": "repos_url", "dtype": "string"}, {"name": "events_url", "dtype": "string"}, {"name": "received_events_url", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "site_admin", "dtype": "bool"}]}, {"name": "open_issues", "dtype": "int64"}, {"name": "closed_issues", "dtype": "int64"}, {"name": "state", "dtype": "string"}, {"name": "created_at", "dtype": "timestamp[s]"}, {"name": "updated_at", "dtype": "timestamp[s]"}, {"name": "due_on", "dtype": "null"}, {"name": "closed_at", "dtype": "null"}]}, {"name": "comments", "sequence": "string"}, {"name": "created_at", "dtype": "timestamp[s]"}, {"name": "updated_at", "dtype": "timestamp[s]"}, {"name": "closed_at", "dtype": "timestamp[s]"}, {"name": "author_association", "dtype": "string"}, {"name": "active_lock_reason", "dtype": "null"}, {"name": "draft", "dtype": "bool"}, {"name": "pull_request", "struct": [{"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "diff_url", "dtype": "string"}, {"name": "patch_url", "dtype": "string"}, {"name": "merged_at", "dtype": "timestamp[s]"}]}, {"name": "body", "dtype": "string"}, {"name": "reactions", "struct": [{"name": "url", "dtype": "string"}, {"name": "total_count", "dtype": "int64"}, {"name": "+1", "dtype": "int64"}, {"name": "-1", "dtype": "int64"}, {"name": "laugh", "dtype": "int64"}, {"name": "hooray", "dtype": "int64"}, {"name": "confused", "dtype": "int64"}, {"name": "heart", "dtype": "int64"}, {"name": "rocket", "dtype": "int64"}, {"name": "eyes", "dtype": "int64"}]}, {"name": "timeline_url", "dtype": "string"}, {"name": "performed_via_github_app", "dtype": "null"}, {"name": "state_reason", "dtype": "string"}, {"name": "is_pull_request", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 11542551, "num_examples": 2000}], "download_size": 3276181, "dataset_size": 11542551}}
|
2023-05-03T08:38:08+00:00
|
3dc086a8c15e25ab6740f34c4bdfad3ebb09f1b0
|
## Dataset Description
- **Repository:** [Link to repo](https://github.com/VityaVitalich/IMAD)
- **Paper:** [IMage Augmented multi-modal Dialogue: IMAD](https://arxiv.org/abs/2305.10512v1)
- **Point of Contact:** [Contacts Section](https://github.com/VityaVitalich/IMAD#contacts)
### Dataset Summary
This dataset contains data from the paper [IMage Augmented multi-modal Dialogue: IMAD](https://arxiv.org/abs/2305.10512v1).
The main feature of this dataset is the novelty of the task. It has been generated specifically for the purpose of image interpretation in a dialogue context.
Some of the dialogue utterances have been replaced with images, allowing a generative model to be trained to restore the initial utterance.
The dialogues are sourced from multiple dialogue datasets (DailyDialog, Commonsense, PersonaChat, MuTual, Empathetic Dialogues, Dream) and have been filtered using a technique described in the paper.
A significant portion of the data has been labeled by assessors, resulting in a high inter-reliability score. The combination of these methods has led to a well-filtered dataset and consequently a high BLEU score.
We hope that this dataset will be beneficial for the development of multi-modal deep learning.
### Data Fields
Dataset contains 5 fields
- `image_id`: `string` that contains id of image in the full Unsplash Dataset
- `source_data`: `string` that contains the name of source dataset
- `utter`: `string` that contains utterance that was replaced in this dialogue with an image
- `context`: `list` of `string` that contains sequence of utterances in the dialogue before the replaced utterance
- `image_like`: `int` that shows if the data was collected with assessors or via filtering technique
### Licensing Information
Textual part of IMAD is licensed under [CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/). Full Dataset with images could be requested directly contacting authors
or could be obtained with matching images_id with Unsplash full dataset.
### Contacts
Feel free to reach out to us at [[email protected]] for inquiries, collaboration suggestions, or data requests related to our work.
### Citation Information
To cite this article please use this BibTex reference
```bibtex
@misc{viktor2023imad,
title={IMAD: IMage-Augmented multi-modal Dialogue},
author={Moskvoretskii Viktor and Frolov Anton and Kuznetsov Denis},
year={2023},
eprint={2305.10512},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
```
Or via MLA Citation
```
Viktor, Moskvoretskii et al. “IMAD: IMage-Augmented multi-modal Dialogue.” (2023).
```
|
VityaVitalich/IMAD
|
[
"task_categories:text-generation",
"task_categories:image-to-text",
"multilinguality:monolingual",
"size_categories:1K<n<10K",
"language:en",
"license:cc-by-nc-4.0",
"multi-modal",
"dialogue",
"arxiv:2305.10512",
"region:us"
] |
2023-05-03T08:49:38+00:00
|
{"language": ["en"], "license": "cc-by-nc-4.0", "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "task_categories": ["text-generation", "image-to-text"], "pretty_name": "IMAD", "tags": ["multi-modal", "dialogue"]}
|
2023-05-29T11:24:29+00:00
|
ae2ab8ca09aaea58167016d8252c501d3fd9a64b
|
# Dataset Card for "cot_ranking"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
fiveflow/cot_ranking
|
[
"region:us"
] |
2023-05-03T09:04:34+00:00
|
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "response_j", "dtype": "string"}, {"name": "response_k", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 64266082, "num_examples": 67830}, {"name": "test", "num_bytes": 3323500, "num_examples": 3570}], "download_size": 408618, "dataset_size": 67589582}}
|
2023-05-03T09:05:32+00:00
|
8cb62ea6e5104e0152624b8bb4af920411e65b29
|
# PoseC3D-UCF101
Original data from [mmaction2](https://github.com/open-mmlab/mmaction2/blob/main/tools/data/skeleton/README.md).
|
kiyoonkim/ucf-101-posec3d
|
[
"region:us"
] |
2023-05-03T09:14:16+00:00
|
{}
|
2023-05-03T11:15:03+00:00
|
5d798b5a534a1bfe96b88aa62d275017a3cad454
|
Beta Dataset
Generated by GPT3.5
|
glombardo/misogynistic-statements-classification
|
[
"task_categories:text-classification",
"language:es",
"license:cc-by-nc-4.0",
"region:us"
] |
2023-05-03T10:02:48+00:00
|
{"language": ["es"], "license": "cc-by-nc-4.0", "task_categories": ["text-classification"], "pretty_name": "Misogynistic statements classification", "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "Non-sexist", "1": "Sexist"}}}}], "splits": [{"name": "train", "num_bytes": 13234, "num_examples": 127}, {"name": "validation", "num_bytes": 4221, "num_examples": 42}, {"name": "test", "num_bytes": 4438, "num_examples": 43}], "download_size": 16218, "dataset_size": 21893}}
|
2023-05-10T18:18:45+00:00
|
c72f67904759f6790fcf3db2e2e31a1dd672b886
|
exp386/Train_ItLit800
|
[
"license:mit",
"region:us"
] |
2023-05-03T10:15:24+00:00
|
{"license": "mit"}
|
2023-05-03T10:22:00+00:00
|
|
c53417490bb089fdb68ab5621a4f352d32ca611d
|
Some car photos from Forza Horizon 4/5 for ENGG5104 final project's open challenge.
## Citation
If you use this dataset, please cite it as:
```
@misc{ILoveCornForzaHorizon,
author = {Yan MA},
title = {Forza horizon caption},
year={2023},
howpublished= {\url{https://huggingface.co/datasets/ILoveCorn/forza-horizon}}
}
```
|
ILoveCorn/forza-horizon
|
[
"license:afl-3.0",
"region:us"
] |
2023-05-03T10:27:10+00:00
|
{"license": "afl-3.0", "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 77297720.0, "num_examples": 50}], "download_size": 77301663, "dataset_size": 77297720.0}}
|
2023-05-05T05:39:45+00:00
|
c8ee87634695ae44c5fa9bcdc5f1a5e7c5ae8e47
|
## Paper
You can find more information in our paper.
- **Dataset Paper:** <https://arxiv.org/abs/2307.04657>
|
PKU-Alignment/PKU-SafeRLHF-10K
|
[
"task_categories:text-generation",
"size_categories:10K<n<100K",
"language:en",
"license:cc-by-nc-4.0",
"safe",
"safety",
"ai-safety",
"llm",
"lm",
"human-feedback",
"rlhf",
"safe-rlhf",
"arxiv:2307.04657",
"region:us"
] |
2023-05-03T10:28:45+00:00
|
{"language": ["en"], "license": "cc-by-nc-4.0", "size_categories": ["10K<n<100K"], "task_categories": ["text-generation"], "tags": ["safe", "safety", "ai-safety", "llm", "lm", "human-feedback", "rlhf", "safe-rlhf"]}
|
2023-07-20T15:29:15+00:00
|
31a004f876bd84812c052957cc4f43fcdf694e4d
|
test
|
andresad/products
|
[
"region:us"
] |
2023-05-03T11:02:19+00:00
|
{}
|
2023-05-05T20:46:55+00:00
|
632ced95db1e042e3487c9f8ea3ef4187f666299
|
# Dataset Card for "PAWS-X-maltese"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
amitness/PAWS-X-maltese
|
[
"region:us"
] |
2023-05-03T11:08:40+00:00
|
{"dataset_info": {"features": [{"name": "sentence1", "dtype": "string"}, {"name": "sentence2", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "not_entailment", "1": "entailment"}}}}, {"name": "sentence1_mt", "dtype": "string"}, {"name": "sentence2_mt", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 972852, "num_examples": 2000}, {"name": "train", "num_bytes": 23898021, "num_examples": 49175}, {"name": "validation", "num_bytes": 965498, "num_examples": 2000}], "download_size": 18059931, "dataset_size": 25836371}}
|
2023-05-03T11:09:04+00:00
|
e31a25a2fb9216b1b6aafc57729150619025ba60
|
从WikiHow页面抽取的中文/英文问答数据
相关项目: [MNBVC](https://github.com/esbatmop/MNBVC)
抽取工具代码:[WikiHowQAExtractor](https://github.com/wanicca/WikiHowQAExtractor)
|
wanicca/WikiHowQA-mnbvc
|
[
"task_categories:question-answering",
"size_categories:10K<n<100K",
"language:en",
"language:zh",
"license:mit",
"region:us"
] |
2023-05-03T12:11:03+00:00
|
{"language": ["en", "zh"], "license": "mit", "size_categories": ["10K<n<100K"], "task_categories": ["question-answering"]}
|
2023-09-04T05:18:28+00:00
|
fd0689779e5eccde5cb2eec718424720010227e5
|
# Dataset Card for "hagrid-hand-enc-250k"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
MakiPan/hagrid-hand-enc-250k
|
[
"region:us"
] |
2023-05-03T12:38:18+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "conditioning_image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 52477517886.216, "num_examples": 232009}], "download_size": 51210807309, "dataset_size": 52477517886.216}}
|
2023-05-03T13:03:13+00:00
|
c1dd97870ab363675ff0c3447799083105335f81
|
# Dataset Card for "mrlove"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
ryrobotics/mrlove
|
[
"region:us"
] |
2023-05-03T12:50:06+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "train"}}}}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 7613973.0, "num_examples": 23}], "download_size": 7615284, "dataset_size": 7613973.0}}
|
2023-05-03T13:24:46+00:00
|
e378f4c70f66413a00c664373f4f43064b3ba752
|
Mindgame dataset
Code:
https://github.com/sileod/llm-theory-of-mind
Article (Accepted at EMNLP 2023 Findings):
https://arxiv.org/abs/2305.03353
```
@article{sileo2023mindgames,
title={MindGames: Targeting Theory of Mind in Large Language Models with Dynamic Epistemic Modal Logic},
author={Sileo, Damien and Lernould, Antoine},
journal={arXiv preprint arXiv:2305.03353},
year={2023}
}
```
|
sileod/mindgames
|
[
"task_categories:text-classification",
"task_ids:natural-language-inference",
"task_ids:multi-input-text-classification",
"multilinguality:monolingual",
"language:en",
"license:apache-2.0",
"theory of mind",
"tom",
"Logical-Reasoning",
"Modal-Logic",
"Reasoning",
"Logics",
"Logic",
"nli",
"model-checking",
"natural language inference",
"arxiv:2305.03353",
"region:us"
] |
2023-05-03T12:56:10+00:00
|
{"language": ["en"], "license": "apache-2.0", "multilinguality": ["monolingual"], "task_categories": ["text-classification"], "task_ids": ["natural-language-inference", "multi-input-text-classification"], "tags": ["theory of mind", "tom", "Logical-Reasoning", "Modal-Logic", "Reasoning", "Logics", "Logic", "nli", "model-checking", "natural language inference"], "dataset_info": {"features": [{"name": "premise", "dtype": "string"}, {"name": "smcdel_problem", "dtype": "string"}, {"name": "n_announcements", "dtype": "int64"}, {"name": "pbcheck", "dtype": "string"}, {"name": "hypothesis", "dtype": "string"}, {"name": "setup", "dtype": "string"}, {"name": "hypothesis_depth", "dtype": "int64"}, {"name": "n_agents", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "names", "sequence": "string"}, {"name": "index", "dtype": "int64"}, {"name": "s-l", "dtype": "string"}, {"name": "deberta_pred", "dtype": "int64"}, {"name": "deberta_confidence", "dtype": "float64"}, {"name": "difficulty", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 8702021, "num_examples": 11174}, {"name": "validation", "num_bytes": 2904084, "num_examples": 3725}, {"name": "test", "num_bytes": 2909341, "num_examples": 3725}], "download_size": 2989857, "dataset_size": 14515446}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}]}
|
2023-11-22T08:24:50+00:00
|
e3ff41c4c4067d21b99bb61d8f40ad1c66666775
|
# Dataset summary
This repository is dedicated to prompts used to perform in-context learning with [starcoder](https://huggingface.co/bigcode/starcoder). As a matter of fact, the model is an
autoregressive language model that is trained on both code and natural language text. It can be turned into an AI-powered technical assistant by prepending conversations to
its 8192-tokens context window.
# Format
The prompt is a .txt file which contains multiple conversations between a human and the assistant. Here is the format
```
-----
Human: <instruction>
Assistant: <answer>
-----
Human: <instruction>
Assistant: <answer>
Human: <instruction>
Assistant: <answer>
.
.
.
-----
```
# Use cases
We want the technical assistant to cover a diverse set of use cases
- **Code-to-text**:
- `What is the purpose of the following code?<code>`
- `What is the bug in the following code?<code>`
- **Text-to-code**:
- `Write/Design/Implement a function to <task>`
- **Code-to-code**:
- `Translate this <code> from <programming language> to <programming language>.`
- **Text-to-text**:
- `What is <technical concept>`
- **General-purpose Q&A**
- `What are you?`
- `What is your purpose?`
# Scope of the work
As a model designed for coding tasks, the user should not expect the model to output relevant answers when prompted with a general-purpose question. When it comes to coding
requests, the output of the model should be post-processed before testing them.
|
bigcode/ta-prompt
|
[
"language:code",
"license:apache-2.0",
"region:us"
] |
2023-05-03T13:04:39+00:00
|
{"language": ["code"], "license": "apache-2.0", "programming_language": ["Java", "JavaScript", "Python"]}
|
2023-05-04T11:20:22+00:00
|
8c5c0e6207e56c5b3968f4897f506c84230568f0
|
milashkaarshif/MoeGirlPedia_wikitext_raw_archive
|
[
"task_categories:text-generation",
"task_categories:text2text-generation",
"size_categories:1M<n<10M",
"language:zh",
"language:ja",
"language:en",
"license:cc-by-nc-sa-3.0",
"wiki",
"wikitext",
"anime",
"comic",
"game",
"archive",
"art",
"music",
"pedia",
"MGP",
"萌娘百科",
"萌百",
"百科",
"维基",
"region:us"
] |
2023-05-03T13:07:17+00:00
|
{"language": ["zh", "ja", "en"], "license": "cc-by-nc-sa-3.0", "size_categories": ["1M<n<10M"], "task_categories": ["text-generation", "text2text-generation"], "tags": ["wiki", "wikitext", "anime", "comic", "game", "archive", "art", "music", "pedia", "MGP", "\u840c\u5a18\u767e\u79d1", "\u840c\u767e", "\u767e\u79d1", "\u7ef4\u57fa"]}
|
2024-02-03T05:57:41+00:00
|
|
b3fb7b9178567fef467a60bf3e0f336c26bbeedd
|
# Dataset Card for "deepfashion_with_captions"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
lirus18/deepfashion_with_captions
|
[
"region:us"
] |
2023-05-03T13:24:55+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "openpose", "dtype": "image"}, {"name": "cloth", "dtype": "image"}, {"name": "caption", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3491966577.847, "num_examples": 13679}], "download_size": 3402087710, "dataset_size": 3491966577.847}}
|
2023-05-03T13:26:49+00:00
|
5823640f100471aab9afd1ea6ec3b4b602f35697
|
# Dataset Card for "COME15K"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
RGBD-SOD/COME15K
|
[
"region:us"
] |
2023-05-03T13:41:51+00:00
|
{"dataset_info": {"features": [{"name": "name", "dtype": "string"}, {"name": "rgb", "dtype": "image"}, {"name": "depth", "dtype": "image"}, {"name": "gt", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 2280732875.25, "num_examples": 8025}, {"name": "validation", "num_bytes": 1256773656.2, "num_examples": 4600}, {"name": "test", "num_bytes": 788633364.0, "num_examples": 3000}], "download_size": 4343671184, "dataset_size": 4326139895.45}}
|
2023-05-07T10:21:37+00:00
|
1053f2518f3bb75e411495b92aa9e60426ccc996
|
This dataset contains the phonetic transcriptions of audios as well as English transcripts. Phonetic transcriptions are based on the g2p model. It can be used to train phoneme recognition
model using wav2vec2.
|
mirfan899/phoneme_asr
|
[
"task_categories:automatic-speech-recognition",
"size_categories:1K<n<10K",
"language:en",
"license:bsd",
"region:us"
] |
2023-05-03T14:04:17+00:00
|
{"language": ["en"], "license": "bsd", "size_categories": ["1K<n<10K"], "task_categories": ["automatic-speech-recognition"], "pretty_name": "timit phoneme datas"}
|
2023-06-17T11:32:48+00:00
|
d92dbb7ed88aef222bcf10d32147cff1e7c7046e
|
# Dataset Card for "autocrit-testing-numbers"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
reciprocate/number-pairs
|
[
"region:us"
] |
2023-05-03T14:16:41+00:00
|
{"dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "selected", "dtype": "string"}, {"name": "rejected", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 30931, "num_examples": 900}, {"name": "test", "num_bytes": 3436, "num_examples": 100}], "download_size": 5098, "dataset_size": 34367}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]}
|
2023-11-20T14:36:37+00:00
|
23c0254754161e157bec25ae605b5da6b56e6431
|
# Dataset Card for "autocrit-testing-imdb"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
reciprocate/imdb-pairs
|
[
"region:us"
] |
2023-05-03T14:17:13+00:00
|
{"dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "selected", "dtype": "string"}, {"name": "rejected", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 62494332.6, "num_examples": 23750}, {"name": "test", "num_bytes": 3289175.4, "num_examples": 1250}], "download_size": 43001661, "dataset_size": 65783508.0}}
|
2023-05-03T14:31:08+00:00
|
bb81a8ce6dcce5d2a1cc4c70859172011dbada07
|
yang365/dataset
|
[
"license:mit",
"region:us"
] |
2023-05-03T14:23:37+00:00
|
{"license": "mit"}
|
2023-05-03T14:24:55+00:00
|
|
e585c4860e615550f9115a32e08570ef4fba024e
|
# Dataset Description: opus_instruction_format
This dataset is a translation dataset from opus-en-fr data, in the same format as the Stanford Alpaca dataset. The dataset contains a set of instructions for translation tasks, which include the following two reformulations:
- "Traduire la ou les phrases suivantes en anglais" (Translate the following sentence(s) into English)
- "Traduce the following sentences in english".
The dataset consists of input sentences in either English or French and their corresponding translations in French or English.
The dataset includes a variety of sentence types, ranging from simple to complex sentences, and covers a wide range of topics.
The dataset consists of a total of 37413 number of examples.
This dataset will be used for training and evaluating LLM such as LLaMA or GPT type models. The goal is to improve it's capabilities to answer in french.
|
LsTam/opus_instruction_format
|
[
"region:us"
] |
2023-05-03T15:48:17+00:00
|
{"dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 30938408, "num_examples": 37413}], "download_size": 19612221, "dataset_size": 30938408}}
|
2023-05-04T07:39:26+00:00
|
2b1f82c913a7b59e41b9bb29f77b81047eec8095
|
# Dataset Card for "sanskrit-stemming-tagging-pali-long"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
chronbmm/sanskrit-stemming-tagging-pali-long
|
[
"region:us"
] |
2023-05-03T16:22:50+00:00
|
{"dataset_info": {"features": [{"name": "sentence", "dtype": "string"}, {"name": "unsandhied", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 525248615, "num_examples": 1655728}, {"name": "validation", "num_bytes": 1858678, "num_examples": 3051}, {"name": "test", "num_bytes": 1924834, "num_examples": 3137}, {"name": "test_long_500", "num_bytes": 302454, "num_examples": 500}, {"name": "validation_long_500", "num_bytes": 311042, "num_examples": 500}], "download_size": 184746128, "dataset_size": 529645623}}
|
2023-05-18T15:51:33+00:00
|
fa21a185e8a78d5df4d689bbdacef40375d21128
|
# Dataset Card for "sanskrit-stemming-tagging-pali"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
chronbmm/sanskrit-stemming-tagging-pali
|
[
"region:us"
] |
2023-05-03T16:23:22+00:00
|
{"dataset_info": {"features": [{"name": "sentence", "dtype": "string"}, {"name": "unsandhied", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 261958158, "num_examples": 1228944}, {"name": "validation", "num_bytes": 1857831, "num_examples": 9213}, {"name": "test", "num_bytes": 1919103, "num_examples": 8763}, {"name": "test_500", "num_bytes": 93787, "num_examples": 500}, {"name": "validation_500", "num_bytes": 104560, "num_examples": 500}], "download_size": 95187903, "dataset_size": 265933439}}
|
2023-05-18T15:51:46+00:00
|
9a7a4ac54627b5e163a845b2778a783fa02a474e
|
productos de axum
|
andresad/articles
|
[
"region:us"
] |
2023-05-03T16:40:20+00:00
|
{}
|
2023-05-03T16:42:13+00:00
|
e801fa5a063133f026de15cc77848afec5397156
|
# Dataset Card for "wikipedia20220301en-bookcorpusopen-chunked-shuffled"
```
num_examples: 33.5 million
download_size: 15.3 GB
dataset_size: 26.1 GB
```
This dataset combines [wikipedia20220301.en](https://huggingface.co/datasets/wikipedia) and [bookcorpusopen](https://huggingface.co/datasets/bookcorpusopen),
and splits the data into smaller chunks, of size ~820 chars
(such that each item will be at least ~128 tokens for the average tokenizer).
The order of the items in this dataset has been shuffled,
meaning you don't have to use `dataset.shuffle`,
which is slower to iterate over.
The logic only splits on spaces, so the chunks are likely to be slightly larger than 820 chars.
The dataset has been normalized into lower case, with accents and non-english characters removed.
Items with less than 200 chars or more than 1000 chars have been removed.
This dataset is processed for convenience, at the expense of losing some percentage of the tokens due to truncation,
(assuming the training minibatches are truncated to 128 tokens).
|
sradc/chunked-shuffled-wikipedia20220301en-bookcorpusopen
|
[
"language:en",
"region:us"
] |
2023-05-03T16:40:58+00:00
|
{"language": "en", "dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 26076989556, "num_examples": 33536113}], "download_size": 17380043798, "dataset_size": 26076989556}}
|
2023-07-17T19:33:04+00:00
|
26557acb2c0535987853422c89ea7fb6def50f2d
|
# Modified Victorian Era Authorship Attribution Dataset
## About
This data set is a modified version of the one that can be found [here](https://archive.ics.uci.edu/ml/datasets/Victorian+Era+Authorship+Attribution).
The difference being that the training dataset was split into two parts: 80% training, 20% testing with labels.
Splitting was done with a random stratified sample approach.
This is different than the source dataset which did not have any labels for the testing data.
Additionally, all text has been converted to UTF-8 format and any errors were ignored.
The original testing data is not included with this release.
## Citation
> GUNGOR, ABDULMECIT, Benchmarking Authorship Attribution Techniques Using Over A Thousand Books by Fifty Victorian Era Novelists, Purdue Master of Thesis, 2018-04
|
NicholasSynovic/Modified-VEAA
|
[
"task_categories:text-classification",
"size_categories:10K<n<100K",
"language:en",
"license:agpl-3.0",
"region:us"
] |
2023-05-03T16:47:32+00:00
|
{"language": ["en"], "license": "agpl-3.0", "size_categories": ["10K<n<100K"], "task_categories": ["text-classification"]}
|
2023-05-03T17:04:48+00:00
|
f441ef4626fbbdc9d6a0b93ca27cae3f543c1b46
|
# Dataset Card for "face-celeb-vietnamese"
## Dataset Summary
This dataset contains information on over 8,000 samples of well-known Vietnamese individuals, categorized into three professions: singers, actors, and beauty queens. The dataset includes data on more than 100 celebrities in each of the three job categories.
## Languages
- Vietnamese: The label is used to indicate the name of celebrities in Vietnamese.
## Dataset Structure
- The image and Vietnamese sequences are
## Source Data - Initial Data Collection and Normalization
[Website người nổi tiếng](https://nguoinoitieng.tv)
### Licensing Information
Apache License 2.0
### Contributions
Thanks to [@github-duongttr](https://github.com/duongttr) and [@github-pphuc25](https://github.com/pphuc25) for adding this dataset.
|
fptudsc/face-celeb-vietnamese
|
[
"task_categories:image-classification",
"task_categories:zero-shot-classification",
"size_categories:10M<n<100M",
"language:vi",
"license:apache-2.0",
"region:us"
] |
2023-05-03T16:56:54+00:00
|
{"language": ["vi"], "license": "apache-2.0", "size_categories": ["10M<n<100M"], "task_categories": ["image-classification", "zero-shot-classification"], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 82233752.864, "num_examples": 8557}], "download_size": 80630170, "dataset_size": 82233752.864}}
|
2023-05-10T14:13:18+00:00
|
3b1335c99f454d33a967f7bf80a5a4667db0656c
|
# Dataset Card for "iemocap_audio_text_splitted"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Ar4ikov/iemocap_audio_text_splitted
|
[
"region:us"
] |
2023-05-03T17:08:58+00:00
|
{"dataset_info": {"features": [{"name": "_id", "dtype": "string"}, {"name": "activation", "dtype": "float64"}, {"name": "dominance", "dtype": "float64"}, {"name": "emotion", "dtype": "string"}, {"name": "end_time", "dtype": "float64"}, {"name": "start_time", "dtype": "float64"}, {"name": "titre", "dtype": "string"}, {"name": "to_translate", "dtype": "string"}, {"name": "translated", "dtype": "string"}, {"name": "valence", "dtype": "float64"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 1148478491.1463113, "num_examples": 8031}, {"name": "test", "num_bytes": 287155695.4826887, "num_examples": 2008}], "download_size": 1409847521, "dataset_size": 1435634186.629}}
|
2023-05-03T17:36:01+00:00
|
3fc456fe3ee11adc0e604cdf2f920192f49530f4
|
HugAda/ada-sokol-style
|
[
"license:afl-3.0",
"region:us"
] |
2023-05-03T17:42:43+00:00
|
{"license": "afl-3.0"}
|
2023-05-03T17:42:43+00:00
|
|
df082443773f2db01e165577c5d94c5dafe90f12
|
# Dataset Card for Dataset Name
## Dataset Description
- **Homepage:**
- **Repository:**
- **Paper:**
- **Leaderboard:**
- **Point of Contact:**
### Dataset Summary
Dataset used to train a language model to do classification on 50 different waste classes.
### Languages
English
## Dataset Structure
### Data Instances
Phrase | Class | Index
-------|-------|-------
"I have this apple phone charger to throw, where should I put it ?" | PHONE CHARGER | 26
"Should I recycle a disposable cup ?" | Plastic Cup | 32
"I have a milk brick" | Tetrapack | 45
### Data Fields
- Phrase
- Class
- Class_index
### Data Splits
train: 12.5K rows
test: 5.38K rows
additional data: 7.24K rows (unseen_phrases.csv)
## Dataset Creation
Manualy with objects and phrases templates.
### Annotations
#### Annotation process
Each object was annotated and then the phrases were annotated according to the object according to its annnotation.
#### Who are the annotators?
Myself
### Personal and Sensitive Information
None
## Considerations for Using the Data
### Social Impact of Dataset
None
### Discussion of Biases
Some classes are more present than others but the dataset is balanced overall. Because it was created using patterns, might not be very robust.
### Other Known Limitations
Repetition of phrase patterns, have to verify performances of model on external phrases for robustness.
|
thomasavare/waste-classification-v2
|
[
"size_categories:10K<n<100K",
"language:en",
"region:us"
] |
2023-05-03T18:09:04+00:00
|
{"language": ["en"], "size_categories": ["10K<n<100K"]}
|
2023-05-23T13:30:38+00:00
|
88fdaef33e415f5c5a888b61a993699ee4812318
|
# Dataset Card for "analisis-sentimeinto-textos-turisitcos-mx-review-corpus"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
alexcom/analisis-sentimeinto-textos-turisitcos-mx-review-corpus
|
[
"region:us"
] |
2023-05-03T18:13:12+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 113848875, "num_examples": 315442}], "download_size": 70253485, "dataset_size": 113848875}}
|
2023-05-03T18:14:04+00:00
|
702b7a45c698596b546f9f6193f55d74ea2aac68
|
# Dataset Card for "simplewiki-2020-11-01-embeddings"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
LukeSajkowski/simplewiki-2020-11-01-embeddings
|
[
"region:us"
] |
2023-05-03T18:17:22+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "embeddings", "sequence": "float32"}, {"name": "id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1718068276, "num_examples": 509663}], "download_size": 1981266913, "dataset_size": 1718068276}}
|
2023-05-18T18:23:39+00:00
|
116ffd846e8a3a00adb243c8792dafb06abeb204
|
Babak-Behkamkia/GPT-3_stance
|
[
"license:mit",
"region:us"
] |
2023-05-03T18:25:00+00:00
|
{"license": "mit"}
|
2023-05-03T18:25:00+00:00
|
|
002d6b60737e1ea4c0fe635201c0450e67a12914
|
ThanHitt/MasuSalmonID
|
[
"license:unknown",
"region:us"
] |
2023-05-03T18:26:10+00:00
|
{"license": "unknown"}
|
2023-05-03T18:26:10+00:00
|
|
e0abd24b1797795ca97611062cc01f0a5ae033d9
|
# Dataset Card for "truthful_qa_mc"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
reaganjlee/truthful_qa_mc
|
[
"region:us"
] |
2023-05-03T18:45:31+00:00
|
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "A", "1": "B", "2": "C", "3": "D"}}}}], "splits": [{"name": "train", "num_bytes": 97337.0, "num_examples": 342}, {"name": "validation", "num_bytes": 97337.0, "num_examples": 342}], "download_size": 103581, "dataset_size": 194674.0}}
|
2023-05-03T18:45:36+00:00
|
88ac921973426c2f2cbbe6861d26fd82556c4953
|
# Dataset Card for "TVCG_Papers"
* Dataset Description
Repository:
Paper:
Leaderboard:
Point of Contact:
* Dataset Summary: This dataset contains 5178 papers from IEEE TVCG. It contains multiple raw attributes of each paper, including both meta-data and abstract.
* Dataset Structure: Jsonl file, each paper instance is a json object.
* Data Fields:
```
FEATURE = Features({
'data': Features({
'issue': Features({
'id': Value(dtype='string'),
'title': Value(dtype='string'),
'year': Value(dtype='string'),
'issueNum': Value(dtype='string'),
'idPrefix': Value(dtype='string'),
'pubType': Value(dtype='string'),
'volume': Value(dtype='string'),
'label': Value(dtype='string'),
'downloadables': Features({
'hasCover': Value(dtype='bool'),
'__typename': Value(dtype='string')
}),
'__typename': Value(dtype='string')
}),
'article': Features({
'id': Value(dtype='string'),
'doi': Value(dtype='string'),
'abstract': Value(dtype='string'),
'abstracts': [
{
'abstractType': Value(dtype='string'),
'content': Value(dtype='string'),
'__typename': Value(dtype='string')
}
],
'normalizedAbstract': Value(dtype='string'),
'title': Value(dtype='string'),
'normalizedTitle': Value(dtype='string'),
'fno': Value(dtype='string'),
'hasPdf': Value(dtype='bool'),
'idPrefix': Value(dtype='string'),
'keywords': [
Value(dtype='string')
],
'authors': [
{
'givenName': Value(dtype='string'),
'surname': Value(dtype='string'),
'fullName': Value(dtype='string'),
'affiliation': Value(dtype='string'),
'__typename': Value(dtype='string')
}
],
'replicability': Features({
'isEnabled': Value(dtype='bool'),
'codeDownloadUrl': Value(dtype='string'),
'codeRepositoryUrl': Value(dtype='string'),
'__typename': Value(dtype='string')
}),
'showBuyMe': Value(dtype='bool'),
'showRecommendedArticles': Value(dtype='bool'),
'isOpenAccess': Value(dtype='bool'),
'issueNum':Value(dtype='string'),
'pubDate': Value(dtype='string'),
'pubType': Value(dtype='string'),
'pages': Value(dtype='string'),
'year': Value(dtype='string'),
'issn': Value(dtype='string'),
'isbn': Value(dtype='string'),
'notes': Value(dtype='string'),
'notesType': Value(dtype='string'),
'__typename': Value(dtype='string'),
}),
'recommendedArticles': [
{
'id': Value(dtype='string'),
'title': Value(dtype='string'),
'doi': Value(dtype='string'),
'abstractUrl': Value(dtype='string'),
'parentPublication':
{
'id': Value(dtype='string'),
'title': Value(dtype='string'),
'__typename': Value(dtype='string')
},
'__typename': Value(dtype='string')
},
],
'adjacentArticles': Features({
'previous': {
'fno': Value(dtype='string'),
'articleId': Value(dtype='string'),
'__typename': Value(dtype='string')},
'next': {
'fno': Value(dtype='string'),
'articleId': Value(dtype='string'),
'__typename': Value(dtype='string')
},
'__typename': Value(dtype='string')
}),
'webExtras': [
Features({
'id': Value(dtype='string'),
'name': Value(dtype='string'),
'location': Value(dtype='string'),
'extension': Value(dtype='string'),
'size': Value(dtype='string'),
'__typename': Value(dtype='string')
})
],
'articleVideos': [Value(dtype='string')]
})
})
```
* Source Data: https://www.computer.org/csdl/journal/tg
* Citation Information: Yamei Tu ([email protected])
|
Yamei/TVCG_Papers
|
[
"size_categories:1K<n<10K",
"visualization ",
"papers",
"academic",
"text",
"region:us"
] |
2023-05-03T18:52:54+00:00
|
{"size_categories": ["1K<n<10K"], "pretty_name": "TVCG Papers ", "dataset_info": {"features": [{"name": "data", "struct": [{"name": "issue", "struct": [{"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "issueNum", "dtype": "string"}, {"name": "idPrefix", "dtype": "string"}, {"name": "pubType", "dtype": "string"}, {"name": "volume", "dtype": "string"}, {"name": "label", "dtype": "string"}, {"name": "downloadables", "struct": [{"name": "hasCover", "dtype": "bool"}, {"name": "__typename", "dtype": "string"}]}, {"name": "__typename", "dtype": "string"}]}, {"name": "article", "struct": [{"name": "id", "dtype": "string"}, {"name": "doi", "dtype": "string"}, {"name": "abstract", "dtype": "string"}, {"name": "abstracts", "list": [{"name": "abstractType", "dtype": "string"}, {"name": "content", "dtype": "string"}, {"name": "__typename", "dtype": "string"}]}, {"name": "normalizedAbstract", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "normalizedTitle", "dtype": "string"}, {"name": "fno", "dtype": "string"}, {"name": "hasPdf", "dtype": "bool"}, {"name": "idPrefix", "dtype": "string"}, {"name": "keywords", "list": "string"}, {"name": "authors", "list": [{"name": "givenName", "dtype": "string"}, {"name": "surname", "dtype": "string"}, {"name": "fullName", "dtype": "string"}, {"name": "affiliation", "dtype": "string"}, {"name": "__typename", "dtype": "string"}]}, {"name": "replicability", "struct": [{"name": "isEnabled", "dtype": "bool"}, {"name": "codeDownloadUrl", "dtype": "string"}, {"name": "codeRepositoryUrl", "dtype": "string"}, {"name": "__typename", "dtype": "string"}]}, {"name": "showBuyMe", "dtype": "bool"}, {"name": "showRecommendedArticles", "dtype": "bool"}, {"name": "isOpenAccess", "dtype": "bool"}, {"name": "issueNum", "dtype": "string"}, {"name": "pubDate", "dtype": "string"}, {"name": "pubType", "dtype": "string"}, {"name": "pages", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "issn", "dtype": "string"}, {"name": "isbn", "dtype": "string"}, {"name": "notes", "dtype": "string"}, {"name": "notesType", "dtype": "string"}, {"name": "__typename", "dtype": "string"}]}, {"name": "recommendedArticles", "list": [{"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "doi", "dtype": "string"}, {"name": "abstractUrl", "dtype": "string"}, {"name": "parentPublication", "struct": [{"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "__typename", "dtype": "string"}]}, {"name": "__typename", "dtype": "string"}]}, {"name": "adjacentArticles", "struct": [{"name": "previous", "struct": [{"name": "fno", "dtype": "string"}, {"name": "articleId", "dtype": "string"}, {"name": "__typename", "dtype": "string"}]}, {"name": "next", "struct": [{"name": "fno", "dtype": "string"}, {"name": "articleId", "dtype": "string"}, {"name": "__typename", "dtype": "string"}]}, {"name": "__typename", "dtype": "string"}]}, {"name": "webExtras", "list": [{"name": "id", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "location", "dtype": "string"}, {"name": "extension", "dtype": "string"}, {"name": "size", "dtype": "string"}, {"name": "__typename", "dtype": "string"}]}, {"name": "articleVideos", "list": "string"}]}], "splits": [{"name": "train", "num_bytes": 39196837, "num_examples": 5178}], "download_size": 15953795, "dataset_size": 39196837}, "tags": ["visualization ", "papers", "academic", "text"]}
|
2023-05-05T19:31:33+00:00
|
a51e4a7a81bcb377e74c6fb8f5833ced0844996b
|
alejandrowallace/tmdb-5000
|
[
"task_categories:zero-shot-classification",
"size_categories:1K<n<10K",
"language:en",
"license:unknown",
"region:us"
] |
2023-05-03T19:17:25+00:00
|
{"language": ["en"], "license": "unknown", "size_categories": ["1K<n<10K"], "task_categories": ["zero-shot-classification"]}
|
2023-05-03T19:19:43+00:00
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.