sha
stringlengths
40
40
text
stringlengths
0
13.4M
id
stringlengths
2
117
tags
list
created_at
stringlengths
25
25
metadata
stringlengths
2
31.7M
last_modified
stringlengths
25
25
7ea0326fa54d2f5b086569448d4c9957ec6a5c17
xiaoqia/PatternS
[ "license:afl-3.0", "region:us" ]
2023-06-06T02:16:34+00:00
{"license": "afl-3.0"}
2023-06-06T02:45:56+00:00
c3d4f86b18e0fc3292f2257571a2dd60522bd5b6
# Dataset Card for "llm-pdf-chunks" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
prognosis/llm-pdf-chunks
[ "region:us" ]
2023-06-06T02:17:49+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}], "splits": [{"name": "train", "num_bytes": 16924740.0, "num_examples": 2065}, {"name": "test", "num_bytes": 1885080.0, "num_examples": 230}], "download_size": 9101680, "dataset_size": 18809820.0}}
2023-06-06T02:18:07+00:00
b8d42090417668e991e48afa07dc2d8fd8481dc0
Grooose/Webuisetting
[ "region:us" ]
2023-06-06T02:20:44+00:00
{}
2023-06-06T06:06:58+00:00
c18762981c49f60814464b0f600e089b23cfb3fd
xiaoqia/p
[ "license:afl-3.0", "region:us" ]
2023-06-06T02:48:04+00:00
{"license": "afl-3.0"}
2023-06-13T09:39:53+00:00
563baa9113218e981ac81da39c389455bdd19ecd
# Dataset Card for "ag_news_test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
xwjzds/ag_news_test
[ "region:us" ]
2023-06-06T03:00:36+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "World", "1": "Sports", "2": "Business", "3": "Sci/Tech"}}}}], "splits": [{"name": "train", "num_bytes": 14837.952631578948, "num_examples": 60}, {"name": "test", "num_bytes": 3956.7873684210526, "num_examples": 16}], "download_size": 19494, "dataset_size": 18794.74}}
2023-06-07T16:01:09+00:00
ce418bdce65e9f9b07a5541acea0d4fd1a54819f
# Dataset Card for "us-congress" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
c4lliope/us-congress
[ "region:us" ]
2023-06-06T03:05:08+00:00
{"dataset_info": {"features": [{"name": "key", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "summaries", "struct": [{"name": "pagination", "struct": [{"name": "count", "dtype": "int64"}]}, {"name": "request", "struct": [{"name": "billNumber", "dtype": "string"}, {"name": "billType", "dtype": "string"}, {"name": "billUrl", "dtype": "string"}, {"name": "congress", "dtype": "string"}, {"name": "contentType", "dtype": "string"}, {"name": "format", "dtype": "string"}]}, {"name": "summaries", "list": [{"name": "actionDate", "dtype": "string"}, {"name": "actionDesc", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "updateDate", "dtype": "string"}, {"name": "versionCode", "dtype": "string"}]}]}, {"name": "plaintext", "dtype": "string"}, {"name": "sponsor", "dtype": "string"}, {"name": "actions", "struct": [{"name": "actions", "list": [{"name": "actionCode", "dtype": "string"}, {"name": "actionDate", "dtype": "string"}, {"name": "actionTime", "dtype": "string"}, {"name": "calendarNumber", "struct": [{"name": "calendar", "dtype": "string"}, {"name": "number", "dtype": "string"}]}, {"name": "committees", "list": [{"name": "name", "dtype": "string"}, {"name": "systemCode", "dtype": "string"}, {"name": "url", "dtype": "string"}]}, {"name": "recordedVotes", "list": [{"name": "chamber", "dtype": "string"}, {"name": "congress", "dtype": "int64"}, {"name": "date", "dtype": "string"}, {"name": "rollNumber", "dtype": "int64"}, {"name": "sessionNumber", "dtype": "int64"}, {"name": "url", "dtype": "string"}]}, {"name": "sourceSystem", "struct": [{"name": "code", "dtype": "int64"}, {"name": "name", "dtype": "string"}]}, {"name": "text", "dtype": "string"}, {"name": "type", "dtype": "string"}]}, {"name": "pagination", "struct": [{"name": "count", "dtype": "int64"}]}, {"name": "request", "struct": [{"name": "billNumber", "dtype": "string"}, {"name": "billType", "dtype": "string"}, {"name": "billUrl", "dtype": "string"}, {"name": "congress", "dtype": "string"}, {"name": "contentType", "dtype": "string"}, {"name": "format", "dtype": "string"}]}]}, {"name": "amendments", "struct": [{"name": "amendments", "list": [{"name": "congress", "dtype": "int64"}, {"name": "description", "dtype": "string"}, {"name": "latestAction", "struct": [{"name": "actionDate", "dtype": "string"}, {"name": "actionTime", "dtype": "string"}, {"name": "text", "dtype": "string"}]}, {"name": "number", "dtype": "string"}, {"name": "purpose", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "updateDate", "dtype": "string"}, {"name": "url", "dtype": "string"}]}, {"name": "pagination", "struct": [{"name": "count", "dtype": "int64"}]}, {"name": "request", "struct": [{"name": "billNumber", "dtype": "string"}, {"name": "billType", "dtype": "string"}, {"name": "billUrl", "dtype": "string"}, {"name": "congress", "dtype": "string"}, {"name": "contentType", "dtype": "string"}, {"name": "format", "dtype": "string"}]}]}, {"name": "committees", "struct": [{"name": "committees", "list": [{"name": "activities", "list": [{"name": "date", "dtype": "string"}, {"name": "name", "dtype": "string"}]}, {"name": "chamber", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "subcommittees", "list": [{"name": "activities", "list": [{"name": "date", "dtype": "string"}, {"name": "name", "dtype": "string"}]}, {"name": "name", "dtype": "string"}, {"name": "systemCode", "dtype": "string"}, {"name": "url", "dtype": "string"}]}, {"name": "systemCode", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "url", "dtype": "string"}]}, {"name": "request", "struct": [{"name": "billNumber", "dtype": "string"}, {"name": "billType", "dtype": "string"}, {"name": "billUrl", "dtype": "string"}, {"name": "congress", "dtype": "string"}, {"name": "contentType", "dtype": "string"}, {"name": "format", "dtype": "string"}]}]}, {"name": "cosponsors", "struct": [{"name": "cosponsors", "list": [{"name": "bioguideId", "dtype": "string"}, {"name": "district", "dtype": "int64"}, {"name": "firstName", "dtype": "string"}, {"name": "fullName", "dtype": "string"}, {"name": "isOriginalCosponsor", "dtype": "bool"}, {"name": "lastName", "dtype": "string"}, {"name": "middleName", "dtype": "string"}, {"name": "party", "dtype": "string"}, {"name": "sponsorshipDate", "dtype": "string"}, {"name": "sponsorshipWithdrawnDate", "dtype": "string"}, {"name": "state", "dtype": "string"}, {"name": "url", "dtype": "string"}]}, {"name": "pagination", "struct": [{"name": "count", "dtype": "int64"}, {"name": "countIncludingWithdrawnCosponsors", "dtype": "int64"}, {"name": "prev", "dtype": "string"}]}, {"name": "request", "struct": [{"name": "billNumber", "dtype": "string"}, {"name": "billType", "dtype": "string"}, {"name": "billUrl", "dtype": "string"}, {"name": "congress", "dtype": "string"}, {"name": "contentType", "dtype": "string"}, {"name": "format", "dtype": "string"}]}]}, {"name": "index", "struct": [{"name": "bill", "struct": [{"name": "actions", "struct": [{"name": "count", "dtype": "int64"}, {"name": "url", "dtype": "string"}]}, {"name": "amendments", "struct": [{"name": "count", "dtype": "int64"}, {"name": "url", "dtype": "string"}]}, {"name": "cboCostEstimates", "list": [{"name": "description", "dtype": "string"}, {"name": "pubDate", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "url", "dtype": "string"}]}, {"name": "committeeReports", "list": [{"name": "citation", "dtype": "string"}, {"name": "url", "dtype": "string"}]}, {"name": "committees", "struct": [{"name": "count", "dtype": "int64"}, {"name": "url", "dtype": "string"}]}, {"name": "congress", "dtype": "int64"}, {"name": "constitutionalAuthorityStatementText", "dtype": "string"}, {"name": "cosponsors", "struct": [{"name": "count", "dtype": "int64"}, {"name": "countIncludingWithdrawnCosponsors", "dtype": "int64"}, {"name": "url", "dtype": "string"}]}, {"name": "introducedDate", "dtype": "string"}, {"name": "latestAction", "struct": [{"name": "actionDate", "dtype": "string"}, {"name": "actionTime", "dtype": "string"}, {"name": "text", "dtype": "string"}]}, {"name": "laws", "list": [{"name": "number", "dtype": "string"}, {"name": "type", "dtype": "string"}]}, {"name": "number", "dtype": "string"}, {"name": "originChamber", "dtype": "string"}, {"name": "policyArea", "struct": [{"name": "name", "dtype": "string"}]}, {"name": "relatedBills", "struct": [{"name": "count", "dtype": "int64"}, {"name": "url", "dtype": "string"}]}, {"name": "sponsors", "list": [{"name": "bioguideId", "dtype": "string"}, {"name": "district", "dtype": "int64"}, {"name": "firstName", "dtype": "string"}, {"name": "fullName", "dtype": "string"}, {"name": "isByRequest", "dtype": "string"}, {"name": "lastName", "dtype": "string"}, {"name": "middleName", "dtype": "string"}, {"name": "party", "dtype": "string"}, {"name": "state", "dtype": "string"}, {"name": "url", "dtype": "string"}]}, {"name": "subjects", "struct": [{"name": "count", "dtype": "int64"}, {"name": "url", "dtype": "string"}]}, {"name": "summaries", "struct": [{"name": "count", "dtype": "int64"}, {"name": "url", "dtype": "string"}]}, {"name": "textVersions", "struct": [{"name": "count", "dtype": "int64"}, {"name": "url", "dtype": "string"}]}, {"name": "title", "dtype": "string"}, {"name": "titles", "struct": [{"name": "count", "dtype": "int64"}, {"name": "url", "dtype": "string"}]}, {"name": "type", "dtype": "string"}, {"name": "updateDate", "dtype": "string"}, {"name": "updateDateIncludingText", "dtype": "string"}]}, {"name": "request", "struct": [{"name": "billNumber", "dtype": "string"}, {"name": "billType", "dtype": "string"}, {"name": "congress", "dtype": "string"}, {"name": "contentType", "dtype": "string"}, {"name": "format", "dtype": "string"}]}]}, {"name": "relatedbills", "struct": [{"name": "pagination", "struct": [{"name": "count", "dtype": "int64"}]}, {"name": "relatedBills", "list": [{"name": "congress", "dtype": "int64"}, {"name": "latestAction", "struct": [{"name": "actionDate", "dtype": "string"}, {"name": "actionTime", "dtype": "string"}, {"name": "text", "dtype": "string"}]}, {"name": "number", "dtype": "int64"}, {"name": "relationshipDetails", "list": [{"name": "identifiedBy", "dtype": "string"}, {"name": "type", "dtype": "string"}]}, {"name": "title", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "url", "dtype": "string"}]}, {"name": "request", "struct": [{"name": "billNumber", "dtype": "string"}, {"name": "billType", "dtype": "string"}, {"name": "billUrl", "dtype": "string"}, {"name": "congress", "dtype": "string"}, {"name": "contentType", "dtype": "string"}, {"name": "format", "dtype": "string"}]}]}, {"name": "subjects", "struct": [{"name": "pagination", "struct": [{"name": "count", "dtype": "int64"}]}, {"name": "request", "struct": [{"name": "billNumber", "dtype": "string"}, {"name": "billType", "dtype": "string"}, {"name": "billUrl", "dtype": "string"}, {"name": "congress", "dtype": "string"}, {"name": "contentType", "dtype": "string"}, {"name": "format", "dtype": "string"}]}, {"name": "subjects", "struct": [{"name": "legislativeSubjects", "list": [{"name": "name", "dtype": "string"}]}, {"name": "policyArea", "struct": [{"name": "name", "dtype": "string"}]}]}]}, {"name": "text", "struct": [{"name": "pagination", "struct": [{"name": "count", "dtype": "int64"}]}, {"name": "request", "struct": [{"name": "billNumber", "dtype": "string"}, {"name": "billType", "dtype": "string"}, {"name": "billUrl", "dtype": "string"}, {"name": "congress", "dtype": "string"}, {"name": "contentType", "dtype": "string"}, {"name": "format", "dtype": "string"}]}, {"name": "textVersions", "list": [{"name": "date", "dtype": "string"}, {"name": "formats", "list": [{"name": "type", "dtype": "string"}, {"name": "url", "dtype": "string"}]}, {"name": "type", "dtype": "string"}]}]}, {"name": "titles", "struct": [{"name": "pagination", "struct": [{"name": "count", "dtype": "int64"}]}, {"name": "request", "struct": [{"name": "billNumber", "dtype": "string"}, {"name": "billType", "dtype": "string"}, {"name": "billUrl", "dtype": "string"}, {"name": "congress", "dtype": "string"}, {"name": "contentType", "dtype": "string"}, {"name": "format", "dtype": "string"}]}, {"name": "titles", "list": [{"name": "billTextVersionCode", "dtype": "string"}, {"name": "billTextVersionName", "dtype": "string"}, {"name": "chamberCode", "dtype": "string"}, {"name": "chamberName", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "titleType", "dtype": "string"}]}]}], "splits": [{"name": "train", "num_bytes": 42798980, "num_examples": 6433}], "download_size": 6439766, "dataset_size": 42798980}}
2023-06-06T03:05:15+00:00
582127081ec2e6b608f3a577201357e70ff6ceec
# Dataset Card for "Stellarx-4b-swe1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Dampish/Stellarx-4b-swe1
[ "region:us" ]
2023-06-06T03:17:57+00:00
{"dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 62764238, "num_examples": 11117}], "download_size": 9078052, "dataset_size": 62764238}}
2023-06-06T03:18:01+00:00
53b79a50cf4fb45e924083594eab4d0de6976ec3
# Dataset Card for "unsplash_5k_blur_61KS" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
wtcherr/unsplash_5k_blur_61KS
[ "region:us" ]
2023-06-06T03:20:36+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "guide", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1477969828.0, "num_examples": 5000}], "download_size": 1477858975, "dataset_size": 1477969828.0}}
2023-06-06T13:09:02+00:00
490575ec2701a3812fd53ddecc9f56631907c6f3
BNNT/mozi_IP_instructions
[ "license:apache-2.0", "region:us" ]
2023-06-06T04:08:23+00:00
{"license": "apache-2.0"}
2023-08-23T05:28:40+00:00
8cd6415ce8fc5c663689f7358666ffe2f0bf3dcf
# Dataset Card for "Sample_vqa_test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Multimodal-Fatima/Sample_vqa_test
[ "region:us" ]
2023-06-06T04:14:23+00:00
{"dataset_info": {"features": [{"name": "question_type", "dtype": "string"}, {"name": "multiple_choice_answer", "dtype": "string"}, {"name": "answers", "sequence": "string"}, {"name": "answers_original", "list": [{"name": "answer", "dtype": "string"}, {"name": "answer_confidence", "dtype": "string"}, {"name": "answer_id", "dtype": "int64"}]}, {"name": "id_image", "dtype": "int64"}, {"name": "answer_type", "dtype": "string"}, {"name": "question_id", "dtype": "int64"}, {"name": "question", "dtype": "string"}, {"name": "image", "dtype": "image"}, {"name": "id", "dtype": "int64"}, {"name": "clip_tags_ViT_L_14", "sequence": "string"}, {"name": "blip_caption", "dtype": "string"}, {"name": "DETA_detections_deta_swin_large_o365_coco_classes", "list": [{"name": "attribute", "dtype": "string"}, {"name": "box", "sequence": "float32"}, {"name": "label", "dtype": "string"}, {"name": "location", "dtype": "string"}, {"name": "ratio", "dtype": "float32"}, {"name": "size", "dtype": "string"}, {"name": "tag", "dtype": "string"}]}, {"name": "LLM_Description_gpt3_downstream_tasks_visual_genome_ViT_L_14", "sequence": "string"}, {"name": "DETA_detections_deta_swin_large_o365_coco_classes_ViT_L_14", "list": [{"name": "attribute", "dtype": "string"}, {"name": "box", "sequence": "float64"}, {"name": "label", "dtype": "string"}, {"name": "location", "dtype": "string"}, {"name": "ratio", "dtype": "float64"}, {"name": "size", "dtype": "string"}, {"name": "tag", "dtype": "string"}]}, {"name": "DETA_detections_deta_swin_large_o365_clip_ViT_L_14", "list": [{"name": "attribute", "dtype": "string"}, {"name": "box", "sequence": "float64"}, {"name": "label", "dtype": "string"}, {"name": "location", "dtype": "string"}, {"name": "ratio", "dtype": "float64"}, {"name": "size", "dtype": "string"}, {"name": "tag", "dtype": "string"}]}, {"name": "DETA_detections_deta_swin_large_o365_clip_ViT_L_14_blip_caption", "list": [{"name": "attribute", "dtype": "string"}, {"name": "box", "sequence": "float64"}, {"name": "caption", "dtype": "string"}, {"name": "label", "dtype": "string"}, {"name": "location", "dtype": "string"}, {"name": "ratio", "dtype": "float64"}, {"name": "size", "dtype": "string"}, {"name": "tag", "dtype": "string"}]}, {"name": "new_info_captions3", "list": [{"name": "attribute", "dtype": "string"}, {"name": "box", "sequence": "float64"}, {"name": "caption", "dtype": "string"}, {"name": "captions_module", "sequence": {"sequence": "string"}}, {"name": "label", "dtype": "string"}, {"name": "location", "dtype": "string"}, {"name": "ratio", "dtype": "float64"}, {"name": "size", "dtype": "string"}, {"name": "tag", "dtype": "string"}]}, {"name": "DETA_detections_deta_swin_large_o365_clip_ViT_L_14_blip_caption_caption_module", "list": [{"name": "attribute", "dtype": "string"}, {"name": "box", "sequence": "float64"}, {"name": "caption", "dtype": "string"}, {"name": "captions_module", "sequence": "string"}, {"name": "label", "dtype": "string"}, {"name": "location", "dtype": "string"}, {"name": "ratio", "dtype": "float64"}, {"name": "size", "dtype": "string"}, {"name": "tag", "dtype": "string"}]}, {"name": "DETA_detections_deta_swin_large_o365_clip_ViT_L_14_blip_caption_caption_module_without_filtering", "list": [{"name": "attribute", "dtype": "string"}, {"name": "box", "sequence": "float64"}, {"name": "caption", "dtype": "string"}, {"name": "captions_module", "sequence": "string"}, {"name": "label", "dtype": "string"}, {"name": "location", "dtype": "string"}, {"name": "ratio", "dtype": "float64"}, {"name": "size", "dtype": "string"}, {"name": "tag", "dtype": "string"}]}, {"name": "clip_tags_LAION_ViT_H_14_2B", "sequence": "string"}, {"name": "LLM_Description_gpt3_downstream_tasks_visual_genome_LAION-ViT-H-14-2B", "sequence": "string"}, {"name": "DETA_detections_deta_swin_large_o365_clip_ViT_L_14_blip_caption_caption_module_random", "list": [{"name": "attribute", "dtype": "string"}, {"name": "box", "sequence": "float64"}, {"name": "caption", "dtype": "string"}, {"name": "captions_module", "sequence": "string"}, {"name": "captions_module_filter", "sequence": "string"}, {"name": "label", "dtype": "string"}, {"name": "location", "dtype": "string"}, {"name": "ratio", "dtype": "float64"}, {"name": "size", "dtype": "string"}, {"name": "tag", "dtype": "string"}]}, {"name": "Attributes_ViT_L_14_descriptors_text_davinci_003_full", "sequence": "string"}, {"name": "Attributes_LAION_ViT_H_14_2B_descriptors_text_davinci_003_full", "sequence": "string"}, {"name": "clip_tags_ViT_L_14_with_openai", "sequence": "string"}, {"name": "clip_tags_LAION_ViT_H_14_2B_with_openai", "sequence": "string"}, {"name": "blip_caption_beam_5_Salesforce_blip2_flan_t5_xxl", "dtype": "string"}, {"name": "DETA_detections_deta_swin_large_o365_coco_classes_caption_all_patches_Salesforce_blip_image_captioning_large_", "list": [{"name": "attribute", "dtype": "string"}, {"name": "box", "sequence": "float64"}, {"name": "captions_all_patches", "sequence": "string"}, {"name": "label", "dtype": "string"}, {"name": "location", "dtype": "string"}, {"name": "ratio", "dtype": "float64"}, {"name": "size", "dtype": "string"}, {"name": "tag", "dtype": "string"}]}, {"name": "DETA_detections_deta_swin_large_o365_coco_classes_caption_all_patches_Salesforce_blip_image_captioning_large_clean", "list": [{"name": "attribute", "dtype": "string"}, {"name": "box", "sequence": "float64"}, {"name": "captions_all_patches", "sequence": "string"}, {"name": "label", "dtype": "string"}, {"name": "location", "dtype": "string"}, {"name": "ratio", "dtype": "float64"}, {"name": "size", "dtype": "string"}, {"name": "tag", "dtype": "string"}]}, {"name": "blip_caption_topk_50_Salesforce_blip_image_captioning_base_multiple", "sequence": "string"}, {"name": "DETA_detections_deta_swin_large_o365_clip_caption_all_patches_Salesforce_blip_image_captioning_large__ViT_L_14", "list": [{"name": "attribute", "dtype": "string"}, {"name": "box", "sequence": "float64"}, {"name": "captions_all_patches", "sequence": "string"}, {"name": "label", "dtype": "string"}, {"name": "location", "dtype": "string"}, {"name": "ratio", "dtype": "float64"}, {"name": "size", "dtype": "string"}, {"name": "tag", "dtype": "string"}]}], "splits": [{"name": "test", "num_bytes": 6612715.0, "num_examples": 10}], "download_size": 3783319, "dataset_size": 6612715.0}}
2023-06-06T04:14:29+00:00
4582bc08659732c9d4f53f528e45181e8bd04948
test
Coaso/yokote_test
[ "task_categories:table-question-answering", "size_categories:n<1K", "language:ja", "license:apache-2.0", "region:us" ]
2023-06-06T04:45:27+00:00
{"language": ["ja"], "license": "apache-2.0", "size_categories": ["n<1K"], "task_categories": ["table-question-answering"]}
2023-06-06T07:09:30+00:00
df1aa1d2687f42f0a39b865eff6b2445d57d8f66
xiemoxiaoshaso/sdwebui
[ "license:openrail", "region:us" ]
2023-06-06T05:17:21+00:00
{"license": "openrail"}
2023-06-25T05:30:32+00:00
99d3669da79e4d4d1cae807ad277f6ade1edbe0a
# Dataset Card for "default_test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
will33am/default_test
[ "region:us" ]
2023-06-06T05:26:21+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "image", "dtype": "image"}, {"name": "prompt", "dtype": "string"}, {"name": "true_label", "sequence": "string"}, {"name": "prediction", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 632261.0, "num_examples": 10}], "download_size": 610894, "dataset_size": 632261.0}}
2023-06-06T05:26:28+00:00
08498d0cc858052cb2ecb3e58ff7ad22d8e040f2
LolorzoloL/forFalcon
[ "license:apache-2.0", "region:us" ]
2023-06-06T05:34:29+00:00
{"license": "apache-2.0"}
2023-06-06T05:40:33+00:00
cc1f431579fcdd322867d11924f33956a55d5590
# Dataset Card for "contracts-classification-instruction-llm-experiments" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
scholarly360/contracts-classification-instruction-llm-experiments
[ "region:us" ]
2023-06-06T05:53:11+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "instruction", "dtype": "string"}, {"name": "instances", "list": [{"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}]}, {"name": "is_classification", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 2227831, "num_examples": 6052}, {"name": "test", "num_bytes": 949692, "num_examples": 2600}], "download_size": 1491275, "dataset_size": 3177523}}
2023-06-06T05:53:13+00:00
f02eb0766abda584f148da92391d62de0a4f62f5
ChristophSchuhmann/Chess-Selfplay
[ "license:apache-2.0", "region:us" ]
2023-06-06T06:02:40+00:00
{"license": "apache-2.0"}
2023-06-26T14:56:10+00:00
8c872e27c87b939a8c1d5bd61e5810e75b7c8bb9
# Dataset Card for "dummy-dataset-sayak" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
coding-assistant-custom/dummy-dataset-sayak
[ "region:us" ]
2023-06-06T06:22:41+00:00
{"dataset_info": {"features": [{"name": "reponame", "dtype": "string"}, {"name": "filepath", "dtype": "string"}, {"name": "content", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 79032, "num_examples": 18}], "download_size": 27713, "dataset_size": 79032}}
2023-06-06T06:22:43+00:00
b56c6ddbb53d54da97501fcee5873865f4dd9622
https://allenai.org/data/barda
tasksource/BarDA
[ "license:odc-by", "region:us" ]
2023-06-06T06:28:09+00:00
{"license": "odc-by"}
2023-06-06T06:28:43+00:00
02f727b45e7b066461ce519cafcf528d415b6b91
choisy/dataset
[ "license:mit", "region:us" ]
2023-06-06T06:37:48+00:00
{"license": "mit"}
2023-06-06T06:42:14+00:00
073031cdc192578a3855d8a20e6ceab1a67f8244
# Google Conceptual Captions in Vietnamese This is Vietnamese version of Google Conceptual Captions dateset. It has more than 3.3 million image urls with captions. It was built by using Google Translate API. The Vietnamese version has the exact metadata as English one. The only difference is the caption content. I provide both English and Vietnamese `.tsv` files. For the English one, one can go to alternative sources: - https://huggingface.co/datasets/conceptual_captions - https://github.com/google-research-datasets/conceptual-captions To download the dataset, one can use the tool: - https://github.com/rom1504/img2dataset/blob/main/dataset_examples/cc3m.md Or just iterate line by line (`caption<tab>url`) ⚠ Note: - Some of image urls might die over the time ([liuhaotian/LLaVA-CC3M-Pretrain-595K](https://huggingface.co/datasets/liuhaotian/LLaVA-CC3M-Pretrain-595K) reported that 15% of original dataset are inaccessible). I'm not responsible for them.
dinhanhx/gcc-vi
[ "task_categories:image-to-text", "task_ids:image-captioning", "language:vi", "language:en", "license:unknown", "gcc", "gcc-vi", "Vietnamese", "region:us" ]
2023-06-06T06:39:18+00:00
{"language": ["vi", "en"], "license": "unknown", "task_categories": ["image-to-text"], "task_ids": ["image-captioning"], "pretty_name": "Google Conceptual Captions in Vietnamese", "source-datasets": ["google"], "tags": ["gcc", "gcc-vi", "Vietnamese"]}
2023-06-06T08:53:39+00:00
5528ccae32204b513c83843549a512f19822bca0
# moss-003-sft-data 本数据集可用于中文多轮对话指令微调,包含110万中英文多轮对话数据。该数据集来自[MOSS项目](https://github.com/OpenLMLab/MOSS#%E6%95%B0%E6%8D%AE) 中的moss-003-sft-data数据集。 在原数据集的基础上,我们去除了冗余信息,仅提取出有效的对话信息,并且调整数据格式,以便在训练中更加灵活地组织数据格式。更多详细信息,可参考MOSS项目介绍。 本数据集为jsonl格式,每行为一个多轮对话数据,格式如下: ```json { "conversation_id":1, "category":"Brainstorming", "conversation":[ { "human":"如何保障工作中遵循正确的安全准则?", "assistant":"为了保障工作中遵循正确的安全准则,以下是一些建议:\n\n1.了解相关安全规定和标准:了解相关的安全规定和标准,并遵守它们。这可以包括公司和政府的安全标准,以及行业标准和最佳实践。\n\n2.培训和教育:确保您和您的同事接受了必要的培训和教育,以了解正确的安全准则和行为。\n\n3.使用正确的工具和设备:确保您使用正确的工具和设备,并且它们得到了正确的维护和保养。\n\n4.个人防护装备:确保您和您的同事穿戴正确的个人防护装备,如安全鞋、透明眼镜或面罩、手套等。\n\n5.创建一个安全文化:鼓励个人对安全的责任感和意识,并创建一个安全文化,使人们始终关注他们自己和他人的安全。\n\n6.持续监测和改进:持续监测和改进安全准则和程序,以确保它们保持最新,并适应变化的情况。\n\n这些措施可以帮助您和您的同事在工作中保障正确的安全准则,确保您的工作场所是一个安全的环境。" }, { "human":"如何应对突发紧急情况?", "assistant":"突发紧急情况可能会在任何时间发生,为了应对这些情况,以下是一些建议:\n\n1.保持冷静:保持冷静和清醒,以便能够更好地应对情况。\n\n2.评估情况:评估紧急情况的性质和范围,以便了解应该采取哪些行动。\n\n3.触发应急计划:如果您的机构或公司有应急计划,立即触发它,并按照计划的步骤行动。\n\n4.联系相关人员:通知必要的人员,例如紧急服务、管理层或其他相关人员,并与他们保持联系。\n\n5.遵循安全准则:在处理紧急情况时,请遵循正确的安全准则,确保自己和其他人的安全。\n\n6.提供及时信息:尽快向相关人员提供准确的信息,以便他们能够做出正确的决策。\n\n7.合作应对:与其他人合作应对紧急情况并相互支持。\n\n总之,应对突发紧急情况需要保持冷静和清醒,评估情况并采取适当的行动。同时,需要遵循正确的安全准则,并与其他人合作应对紧急情况。" }, { "human":"在网络安全方面,如何避免遭受黑客攻击?", "assistant":"以下是一些避免遭受黑客攻击的建议:\n\n1.更新安全软件:确保您的操作系统、应用程序和安全软件是最新的,并随时更新它们。这可以帮助您及时发现和修复安全漏洞。\n\n2.使用强密码:使用强密码,包括字母、数字和特殊字符。不要使用容易猜测的密码,如生日、姓名或常用单词。\n\n3.使用多因素身份验证:使用多因素身份验证来增加账户的安全性,例如发送短信验证码或使用指纹识别。\n\n4.小心附件和链接:不要打开未知的附件或链接,因为它们可能包含恶意软件。\n\n5.备份重要文件:定期备份重要文件,并将其存储在安全的位置。\n\n6.保护个人信息:不要在互联网上公开个人信息,例如出生日期、地址或社交安全号码。\n\n7.使用加密连接:在访问敏感信息时,使用加密连接,如HTTPS。\n\n8.教育员工:向员工提供网络安全培训,并确保他们了解正确的安全措施和行为。\n\n总之,避免遭受黑客攻击需要采取多种措施,包括更新安全软件、使用强密码、使用多因素身份验证、小心附件和链接、备份重要文件、保护个人信息、使用加密连接以及教育员工。这些措施可以帮助您保护个人信息和数据免受黑客攻击。" }, { "human":"如何确保网络连接的安全性?", "assistant":"以下是一些确保网络连接安全性的建议:\n\n1.使用加密连接:使用加密连接,如HTTPS,以确保数据传输过程中不被窃听或篡改。\n\n2.使用防病毒软件:安装和更新防病毒软件,可以帮助检测和清除恶意软件。\n\n3.使用防火墙:使用防火墙保护网络和设备,可以帮助阻止未经授权的访问和攻击。\n\n4.禁止未经授权的访问:禁止未经授权的访问,包括限制网络访问和设备访问权限。\n\n5.定期更新操作系统和应用程序:定期更新操作系统和应用程序,以修复安全漏洞和缺陷。\n\n6.使用强密码:使用强密码,包括字母、数字和特殊字符,并定期更改密码。\n\n7.控制访问:控制访问网络和设备的人员,仅授权人员可访问重要资源。\n\n8.教育员工:向员工提供网络安全培训,并确保他们了解正确的安全措施和行为。\n\n总之,确保网络连接的安全性需要采取多种措施,包括使用加密连接、使用防病毒软件、使用防火墙、禁止未经授权的访问、定期更新操作系统和应用程序、使用强密码、控制访问和教育员工。这些措施可以帮助您保护网络和设备免受未经授权的访问和攻击。" }, { "human":"对于金融机构来说,如何保护客户的隐私和数据安全?", "assistant":"金融机构需要对客户的隐私和数据安全高度重视,以下是一些保护客户隐私和数据安全的建议:\n\n1.遵守合规要求:确保遵守所有适用的合规要求和监管标准,包括数据隐私法规和安全标准。\n\n2.加强身份验证:采用多因素身份验证,例如使用短信验证码、指纹识别等,以确保客户身份的真实性。\n\n3.加密敏感数据:使用加密技术对敏感数据进行加密,以保护客户信息不被未经授权的人员获得。\n\n4.建立安全控制:建立安全控制,包括访问控制、安全审计和安全事件响应,以保护客户信息的安全性。\n\n5.教育员工:向员工提供网络安全培训,并确保他们了解正确的安全措施和行为。\n\n6.定期测试:定期进行安全测试和漏洞扫描,以发现和修复潜在的安全漏洞。\n\n7.建立应急计划:建立应急计划,并对其进行定期测试,以确保在紧急情况下能够及时响应和恢复。\n\n总之,保护客户隐私和数据安全需要采取多种措施,包括遵守合规要求、加强身份验证、加密敏感数据、建立安全控制、教育员工、定期测试和建立应急计划。这些措施可以帮助金融机构保护客户信息的安全性和隐私。" } ] } ```
YeungNLP/moss-003-sft-data
[ "region:us" ]
2023-06-06T06:45:41+00:00
{}
2023-06-09T09:55:57+00:00
e08dad53a6724d0ece986aba8a33b0e3390ac1aa
# Dataset Card for "socce_report_analysis" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ParsaKgvr/socce_report_analysis
[ "region:us" ]
2023-06-06T06:48:05+00:00
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "sent0", "dtype": "string"}, {"name": "sent1", "dtype": "string"}, {"name": "sent2", "dtype": "string"}, {"name": "sent3", "dtype": "string"}, {"name": "sent4", "dtype": "string"}, {"name": "sent5", "dtype": "string"}, {"name": "sent6", "dtype": "string"}, {"name": "sent7", "dtype": "string"}, {"name": "sent8", "dtype": "string"}, {"name": "sent9", "dtype": "string"}, {"name": "sent10", "dtype": "string"}, {"name": "sent11", "dtype": "string"}, {"name": "sent12", "dtype": "string"}, {"name": "sent13", "dtype": "string"}, {"name": "sent14", "dtype": "string"}, {"name": "sent15", "dtype": "string"}, {"name": "sent16", "dtype": "string"}, {"name": "sent17", "dtype": "string"}, {"name": "sent18", "dtype": "string"}, {"name": "sent19", "dtype": "string"}, {"name": "sent20", "dtype": "string"}, {"name": "sent21", "dtype": "string"}, {"name": "sent22", "dtype": "string"}, {"name": "sent23", "dtype": "string"}, {"name": "sent24", "dtype": "string"}, {"name": "sent25", "dtype": "string"}, {"name": "sent26", "dtype": "string"}, {"name": "sent27", "dtype": "string"}, {"name": "sent28", "dtype": "string"}, {"name": "sent29", "dtype": "string"}, {"name": "sent30", "dtype": "string"}, {"name": "sent31", "dtype": "string"}, {"name": "sent32", "dtype": "string"}, {"name": "sent33", "dtype": "string"}, {"name": "sent34", "dtype": "string"}, {"name": "sent35", "dtype": "string"}, {"name": "sent36", "dtype": "string"}, {"name": "sent37", "dtype": "string"}, {"name": "sent38", "dtype": "string"}, {"name": "sent39", "dtype": "string"}, {"name": "sent40", "dtype": "string"}, {"name": "sent41", "dtype": "string"}, {"name": "sent42", "dtype": "string"}, {"name": "sent43", "dtype": "string"}, {"name": "sent44", "dtype": "string"}, {"name": "sent45", "dtype": "string"}, {"name": "sent46", "dtype": "string"}, {"name": "sent47", "dtype": "string"}, {"name": "sent48", "dtype": "string"}, {"name": "sent49", "dtype": "string"}, {"name": "sent50", "dtype": "string"}, {"name": "sent51", "dtype": "string"}, {"name": "sent52", "dtype": "string"}, {"name": "sent53", "dtype": "string"}, {"name": "sent54", "dtype": "string"}, {"name": "sent55", "dtype": "string"}, {"name": "sent56", "dtype": "string"}, {"name": "sent57", "dtype": "string"}, {"name": "sent58", "dtype": "string"}, {"name": "sent59", "dtype": "string"}, {"name": "sent60", "dtype": "string"}, {"name": "sent61", "dtype": "string"}, {"name": "sent62", "dtype": "string"}, {"name": "sent63", "dtype": "string"}, {"name": "sent64", "dtype": "string"}, {"name": "sent65", "dtype": "string"}, {"name": "sent66", "dtype": "string"}, {"name": "sent67", "dtype": "string"}, {"name": "sent68", "dtype": "string"}, {"name": "sent69", "dtype": "string"}, {"name": "sent70", "dtype": "string"}, {"name": "sent71", "dtype": "string"}, {"name": "sent72", "dtype": "string"}, {"name": "sent73", "dtype": "string"}, {"name": "sent74", "dtype": "string"}, {"name": "sent75", "dtype": "string"}, {"name": "sent76", "dtype": "string"}, {"name": "sent77", "dtype": "string"}, {"name": "sent78", "dtype": "string"}, {"name": "sent79", "dtype": "string"}, {"name": "sent80", "dtype": "string"}, {"name": "sent81", "dtype": "string"}, {"name": "sent82", "dtype": "string"}, {"name": "sent83", "dtype": "string"}, {"name": "sent84", "dtype": "string"}, {"name": "sent85", "dtype": "string"}, {"name": "sent86", "dtype": "string"}, {"name": "sent87", "dtype": "string"}, {"name": "sent88", "dtype": "string"}, {"name": "sent89", "dtype": "string"}, {"name": "sent90", "dtype": "string"}, {"name": "sent91", "dtype": "string"}, {"name": "sent92", "dtype": "string"}, {"name": "sent93", "dtype": "string"}, {"name": "sent94", "dtype": "string"}, {"name": "sent95", "dtype": "string"}, {"name": "sent96", "dtype": "string"}, {"name": "sent97", "dtype": "string"}, {"name": "sent98", "dtype": "string"}, {"name": "sent99", "dtype": "string"}, {"name": "sent100", "dtype": "string"}, {"name": "sent101", "dtype": "string"}, {"name": "sent102", "dtype": "string"}, {"name": "sent103", "dtype": "string"}, {"name": "sent104", "dtype": "string"}, {"name": "sent105", "dtype": "string"}, {"name": "sent106", "dtype": "string"}, {"name": "sent107", "dtype": "string"}, {"name": "sent108", "dtype": "string"}, {"name": "sent109", "dtype": "string"}, {"name": "sent110", "dtype": "string"}, {"name": "sent111", "dtype": "string"}, {"name": "sent112", "dtype": "string"}, {"name": "sent113", "dtype": "string"}, {"name": "sent114", "dtype": "string"}, {"name": "sent115", "dtype": "string"}, {"name": "sent116", "dtype": "string"}, {"name": "sent117", "dtype": "string"}, {"name": "sent118", "dtype": "string"}, {"name": "sent119", "dtype": "string"}, {"name": "sent120", "dtype": "string"}, {"name": "sent121", "dtype": "string"}, {"name": "sent122", "dtype": "string"}, {"name": "sent123", "dtype": "string"}, {"name": "sent124", "dtype": "string"}, {"name": "sent125", "dtype": "string"}, {"name": "sent126", "dtype": "string"}, {"name": "sent127", "dtype": "string"}, {"name": "sent128", "dtype": "string"}, {"name": "sent129", "dtype": "string"}, {"name": "sent130", "dtype": "string"}, {"name": "sent131", "dtype": "string"}, {"name": "sent132", "dtype": "string"}, {"name": "sent133", "dtype": "string"}, {"name": "sent134", "dtype": "string"}, {"name": "sent135", "dtype": "string"}, {"name": "sent136", "dtype": "string"}, {"name": "player0", "dtype": "string"}, {"name": "rating0", "dtype": "string"}, {"name": "player1", "dtype": "string"}, {"name": "rating1", "dtype": "string"}, {"name": "player2", "dtype": "string"}, {"name": "rating2", "dtype": "string"}, {"name": "player3", "dtype": "string"}, {"name": "rating3", "dtype": "string"}, {"name": "player4", "dtype": "string"}, {"name": "rating4", "dtype": "string"}, {"name": "player5", "dtype": "string"}, {"name": "rating5", "dtype": "string"}, {"name": "player6", "dtype": "string"}, {"name": "rating6", "dtype": "string"}, {"name": "player7", "dtype": "string"}, {"name": "rating7", "dtype": "string"}, {"name": "player8", "dtype": "string"}, {"name": "rating8", "dtype": "string"}, {"name": "player9", "dtype": "string"}, {"name": "rating9", "dtype": "string"}, {"name": "player10", "dtype": "string"}, {"name": "rating10", "dtype": "string"}, {"name": "player11", "dtype": "string"}, {"name": "rating11", "dtype": "string"}, {"name": "player12", "dtype": "string"}, {"name": "rating12", "dtype": "string"}, {"name": "player13", "dtype": "string"}, {"name": "rating13", "dtype": "string"}, {"name": "player14", "dtype": "string"}, {"name": "rating14", "dtype": "string"}, {"name": "player15", "dtype": "string"}, {"name": "rating15", "dtype": "string"}, {"name": "player16", "dtype": "string"}, {"name": "rating16", "dtype": "string"}, {"name": "player17", "dtype": "string"}, {"name": "rating17", "dtype": "string"}, {"name": "player18", "dtype": "string"}, {"name": "rating18", "dtype": "string"}, {"name": "player19", "dtype": "string"}, {"name": "rating19", "dtype": "string"}, {"name": "player20", "dtype": "string"}, {"name": "rating20", "dtype": "string"}, {"name": "player21", "dtype": "string"}, {"name": "rating21", "dtype": "string"}, {"name": "player22", "dtype": "string"}, {"name": "rating22", "dtype": "string"}, {"name": "player23", "dtype": "string"}, {"name": "rating23", "dtype": "string"}, {"name": "player24", "dtype": "string"}, {"name": "rating24", "dtype": "string"}, {"name": "player25", "dtype": "string"}, {"name": "rating25", "dtype": "string"}, {"name": "player26", "dtype": "string"}, {"name": "rating26", "dtype": "string"}, {"name": "player27", "dtype": "string"}, {"name": "rating27", "dtype": "string"}, {"name": "player28", "dtype": "string"}, {"name": "rating28", "dtype": "string"}, {"name": "player29", "dtype": "string"}, {"name": "rating29", "dtype": "string"}, {"name": "player30", "dtype": "string"}, {"name": "rating30", "dtype": "string"}, {"name": "player31", "dtype": "string"}, {"name": "rating31", "dtype": "string"}, {"name": "player32", "dtype": "string"}, {"name": "rating32", "dtype": "string"}, {"name": "player33", "dtype": "string"}, {"name": "rating33", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 13072462, "num_examples": 1996}], "download_size": 6901926, "dataset_size": 13072462}}
2023-06-06T07:09:18+00:00
9393535b3a5369a4e8919cb70c975e1ab444f93c
Babak-Behkamkia/Personality_Detection
[ "license:mit", "region:us" ]
2023-06-06T06:53:55+00:00
{"license": "mit"}
2023-06-06T07:07:12+00:00
14f666f653da72b66783750a14e5db476647ad40
# Dataset Card for "github-lll" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
erberry/github-lll
[ "region:us" ]
2023-06-06T06:58:01+00:00
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "fname", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 72915548, "num_examples": 156824}], "download_size": 26657194, "dataset_size": 72915548}}
2023-06-15T02:13:30+00:00
f8cb6a158305bd431663bc816dfd5eac6ba803f5
# Dataset Card for "minipile_512" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ZelaAI/minipile_512
[ "region:us" ]
2023-06-06T07:11:49+00:00
{"dataset_info": {"features": [{"name": "tokens", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 11940149436, "num_examples": 2907859}], "download_size": 3106480852, "dataset_size": 11940149436}}
2023-06-06T08:52:39+00:00
3820b686d2705441857b6e8b7bb087da91c7d13a
# Dataset Card for "ohjeet" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Amselco/ohjeet
[ "region:us" ]
2023-06-06T07:13:12+00:00
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "ohje", "dtype": "string"}, {"name": "konteksti", "dtype": "string"}, {"name": "vastaus", "dtype": "string"}, {"name": "kategoria", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 9021406, "num_examples": 14075}], "download_size": 6027164, "dataset_size": 9021406}}
2023-06-11T15:59:02+00:00
197f5eaeed026418253ef07c0c4fea84338637c4
jcaiaq/lm
[ "region:us" ]
2023-06-06T07:23:56+00:00
{}
2023-06-06T13:47:35+00:00
b871815c51d257745cda3e032ab1654f967bfafc
# Dataset Card for llm-book/aio-passages-bert-base-japanese-v3-bpr 書籍『大規模言語モデル入門』で使用する、「AI王」コンペティションのパッセージデータセットに BPR によるパッセージの埋め込みを適用したデータセットです。 [llm-book/aio-passages](https://huggingface.co/datasets/llm-book/aio-passages) のデータセットに対して、[llm-book/bert-base-japanese-v3-bpr-passage-encoder](https://huggingface.co/llm-book/bert-base-japanese-v3-bpr-passage-encoder) によるパッセージのバイナリベクトルが `embeddings` フィールドに追加されています。 ## Licence 本データセットで利用している Wikipedia のコンテンツは、[クリエイティブ・コモンズ表示・継承ライセンス 3.0 (CC BY-SA 3.0)](https://creativecommons.org/licenses/by-sa/3.0/deed.ja) および [GNU 自由文書ライセンス (GFDL)](https://www.gnu.org/licenses/fdl.html) の下に配布されているものです。
llm-book/aio-passages-bpr-bert-base-japanese-v3
[ "size_categories:1M<n<10M", "language:ja", "license:cc-by-sa-3.0", "license:gfdl", "region:us" ]
2023-06-06T07:24:36+00:00
{"language": ["ja"], "license": ["cc-by-sa-3.0", "gfdl"], "size_categories": ["1M<n<10M"], "dataset_info": {"features": [{"name": "id", "dtype": "int32"}, {"name": "pageid", "dtype": "int32"}, {"name": "revid", "dtype": "int32"}, {"name": "text", "dtype": "string"}, {"name": "section", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "embeddings", "sequence": "uint8"}], "splits": [{"name": "train", "num_bytes": 3483313719, "num_examples": 4288198}], "download_size": 2160522807, "dataset_size": 3483313719}}
2023-06-30T09:30:40+00:00
1eaeffe6c2982293050d4868d7dcb9f2318892eb
# DragonFire0159x/nijijourney-images Dataset with images generated by niji-journey Contains only images, no prompts # What's in the repository Here are the archives with different dataset sizes For example, the niji_dataset_404.zip archive contains 404 pictures You can also use to fine tune the Stable Diffusion
DragonFire0159x/nijijourney-images
[ "task_categories:unconditional-image-generation", "size_categories:n<1K", "region:us" ]
2023-06-06T07:35:30+00:00
{"size_categories": ["n<1K"], "task_categories": ["unconditional-image-generation"]}
2023-06-06T08:23:43+00:00
d1ea7654218aa705eb46bc684f573e07289db342
# Dataset Card for "ine-dataset-test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
camenduru/ine-dataset-test
[ "region:us" ]
2023-06-06T07:37:13+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 105057652.0, "num_examples": 369}], "download_size": 103726194, "dataset_size": 105057652.0}}
2023-06-06T07:37:28+00:00
94bf52cd3c99f61a493be15c0abe2ebb3ba12ce2
a. 997 b. 857 c. 649 d. 552 e. 783
graspen/odev
[ "region:us" ]
2023-06-06T08:10:19+00:00
{}
2023-06-06T08:10:43+00:00
ad18324b983a99545d17804199172f6c74f19d38
# Outdoor Garbage Dataset The dataset consisting of garbage cans of various capacities and types. Best to train a neural network to monitor the timely removal of garbage and organize the logistics of vehicles for garbage collection. Dataset is useful for the recommendation systems, optimization and automization the work of coomunity services, smart city. ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F618942%2Fdc8b0574545e4e7963b06f6c47ce4a8e%2F(1).png?generation=1686047397390850&alt=media) # Get the dataset ### This is just an example of the data Leave a request on [**https://trainingdata.pro/data-market**](https://trainingdata.pro/data-market?utm_source=huggingface&utm_medium=cpc&utm_campaign=outdoor_garbage) to discuss your requirements, learn about the price and buy the dataset. # Content Dataset includes 10 000 images of trash cans: - in different times of day - in different weather conditions ## Types of garbage cans capacity - **is_full** - at least one of the trash cans shown in the photo is completely full. This type includes filled to the top, overflown cans. - **is_empty** - garbage cans have free space, it could be half full or completely empty. - **is_scattered** - the tag is added with is_empty or is_full. The tag means that the garbage (volumetric garbage bags, or building waste, but not single elements) is scattered nearby. # Data Format Each image from `img` folder is accompanied by an XML-annotation in the `annotations.xml` file indicating the labeled types of garbage cans capacities for each image in the dataset. # Example of XML file structure ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F618942%2F23d24b8378ac2db06e71e57ecb3d13ac%2Fcarbon%20(5).png?generation=1686076026295933&alt=media) ## [**TrainingData**](https://trainingdata.pro/data-market?utm_source=huggingface&utm_medium=cpc&utm_campaign=outdoor_garbage) provides high-quality data annotation tailored to your needs More datasets in TrainingData's Kaggle account: **https://www.kaggle.com/trainingdatapro/datasets** TrainingData's GitHub: **https://github.com/Trainingdata-datamarket/TrainingData_All_datasets**
TrainingDataPro/outdoor_garbage
[ "task_categories:image-classification", "language:en", "license:cc-by-nd-4.0", "code", "region:us" ]
2023-06-06T08:29:54+00:00
{"language": ["en"], "license": "cc-by-nd-4.0", "task_categories": ["image-classification"], "tags": ["code"], "dataset_info": {"features": [{"name": "image_id", "dtype": "int32"}, {"name": "image", "dtype": "image"}, {"name": "annotations", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 608467996, "num_examples": 100}], "download_size": 607803398, "dataset_size": 608467996}}
2023-09-14T15:52:34+00:00
601abf1a8a103e0f46a1f3761d22bfa71540909c
# translation_models
GeorgeSpark/translationmodels
[ "region:us" ]
2023-06-06T08:37:27+00:00
{}
2023-06-06T08:44:39+00:00
baa5e5cc72078352b599c302c97ee2ae1c72c417
# Docstring to code data ## Dataset Summary This dataset contains pairs of English text and code from multiple programming language pairs. Namely, text is paired with code snippets for: Python, Java, JavaScript, and Go. The data is curated via an automated filtering pipeline from source files within [The Stack](https://huggingface.co/datasets/bigcode/the-stack). ## Supported Tasks This dataset can be used to finetune models for code-to-text and/or text-to-code models, both on information retrieval or conditional generation settings. ## Splits ```python DATA_SPLITS = {"python_gh", "java_gh", "javascript_gh", "go_gh"} ``` ## How to get the data with a given programming language ```python from datasets import load_dataset def get_dataset(prog_lang): test_data = load_dataset("blindsubmissions/GH_text2code", split=prog_lang) return test_data ``` ## Dataset Structure ### Data Instances Each data instance corresponds to function/methods occurring in licensed files that compose The Stack. That is, files with permissive licences collected from GitHub. ### Relevant Data Fields - identifier (string): Function/method name. - parameters (string): Function parameters. - return_statement (string): Return statement if found during parsing. - docstring (string): Complete docstring content. - docstring_summary (string): Summary/processed docstring dropping args and return statements. - function (string): Actual function/method content. - argument_list (null): List of arguments. - language (string): Programming language of the function. - type (string): Return type if found during parsing. ## Summary of data curation pipeline - Filtering out repositories that appear in [CodeSearchNet](https://huggingface.co/datasets/code_search_net). - Filtering the files that belong to the programming languages of interest. - Pre-filtering the files that likely contain text in the natural languages of interest. - AST parsing with [Tree-sitter](\url{https://tree-sitter.github.io/tree-sitter/). - Perform language identification of docstrings in the resulting set of functions/methods and select the ones classified as English via majority voting. ## Social Impact of the dataset This dataset is released with the aim to increase the availability of training data available to the NLP for code research community by providing text/code paired data. We expect this data to help enable more accurate information retrieval systems and text-to-code or code-to-text summarization. As a subset of The Stack, this dataset inherits de-risking efforts carried out when that dataset was built, though we highlight risks exist and malicious use of the data could exist such as, for instance, to aid on creation of malicious code. We highlight however that this is a risk shared by any code dataset made openly available. Moreover, we remark that the data may contain harmful or offensive language, which could be learned by models trained on it. ## Discussion of Biases The data is collected from GitHub and naturally occurring text on that platform. As a consequence, certain languages are more or less likely to contain well documented code and, as such, resulting data will not be uniformly represented in terms of their programing languages. ## Known limitations The dataset can be expanded to further improve its coverage. Moreover, we use text naturally occurring as comments or docstrings as opposed to human annotators. As such, resulting data will have high variance in terms of quality depending on practices of sub-communities of software developers. However, we remark that the task our evaluation dataset defines is reflective of what searching on a real codebase would look like. Finally, we note that some imbalance on data is observed due to the same reason since certain languages are more or less likely to contain well documented code. ## Maintenance plan: The data will be kept up to date by following The Stack releases. We should rerun our pipeline for every new release and add non-overlapping new content to both training and testing partitions of our data. This is so that we carry over opt-out updates and include fresh repos. ## Update plan: - Cover all 6 programming languages from CodeSearchNet. ## Licensing Information M2CRB is a subset filtered and pre-processed from [The Stack](https://huggingface.co/datasets/bigcode/the-stack), a collection of source code from repositories with various licenses. Any use of all or part of the code gathered in M2CRB must abide by the terms of the original licenses.
blindsubmissions/GH_text2code
[ "task_categories:translation", "task_categories:summarization", "task_categories:text2text-generation", "size_categories:10M<n<100M", "language:en", "code", "region:us" ]
2023-06-06T08:39:32+00:00
{"language": ["en"], "size_categories": ["10M<n<100M"], "task_categories": ["translation", "summarization", "text2text-generation"], "dataset_info": {"features": [{"name": "identifier", "dtype": "string"}, {"name": "parameters", "dtype": "string"}, {"name": "docstring", "dtype": "string"}, {"name": "docstring_summary", "dtype": "string"}, {"name": "function", "dtype": "string"}, {"name": "function_tokens", "sequence": "string"}, {"name": "start_point", "sequence": "int64"}, {"name": "end_point", "sequence": "int64"}, {"name": "language", "dtype": "string"}, {"name": "docstring_language", "dtype": "string"}, {"name": "docstring_language_predictions", "dtype": "string"}, {"name": "is_langid_reliable", "dtype": "string"}], "splits": [{"name": "python_gh", "num_bytes": 36300760423, "num_examples": 15000002}, {"name": "java_gh", "num_bytes": 21613057110, "num_examples": 15000014}, {"name": "go_gh", "num_bytes": 22559741937, "num_examples": 15000078}, {"name": "javascript_gh", "num_bytes": 3895688311, "num_examples": 2000040}], "download_size": 166324499, "dataset_size": 84369247781}, "tags": ["code"]}
2023-08-08T15:29:57+00:00
5610d0dbad8a420dcf183bbdf00161170ba019a5
# Dataset of bald people Dataset consists of 5000 photos of people with 7 stages of hairloss according to the Norwood scale. Dataset is useful for training neural networks for the recommendation systems, optimizing the work processes of trichologists and applications in the Med / Beauty spheres. # Get the dataset ### This is just an example of the data Leave a request on [**https://trainingdata.pro/data-market**](https://trainingdata.pro/data-market?utm_source=huggingface&utm_medium=cpc&utm_campaign=bald_classification) to discuss your requirements, learn about the price and buy the dataset. # Image Similar images are presented in the dataset: ![](https://sun9-9.userapi.com/impg/pS79DM0yYyVS1EhPNczJm84cCFa2kViOqHZzTg/GRxNq7_nc_o.jpg?size=2560x1004&quality=95&sign=2e5dac432dd8dfe8191b2828aaeec9cd&type=album) # Hamilton–Norwood scale - **type_1**: There is a lack of bilateral recessions along the anterior border of the hairline in the frontoparietal regions. No notable hair loss or recession of the hairline. - **type_2**: There is a small recession of the hairline around the temples. Hair is also lost, or sparse, along the midfrontal border of the scalp, but the depth of the affected area is much less than in the frontoparietal regions. This is commonly referred to as an adult or mature hairline. - **type_3**: The first signs of significant balding appear. There is a deep, symmetrical recession at the temples that are only sparsely covered by hair. - **type_4**: The hairline recession is harsher than in stage 2, and there is scattered hair or no hair on the vertex. There are deep frontotemporal recessions, usually symmetrical, and are either bare or very sparsely covered by hair. - **type_5**: The areas of hair loss are more significant than in stage 4. They are still divided, but the band of hair between them is thinner and sparser. - **type_6**: The connection of hair that crosses the crown is gone with only sparse hair remaining. The frontotemporal and vertex regions are joined together, and the extent of hair loss is more significant. - **type_7**: The most drastic stage of hair loss, only a band of hair, going around the sides of the head persists. This hair usually is not thick and might be dainty. ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F618942%2F9ecea1768d64ac8444d74ad3c068160b%2F.png?generation=1686041157081408&alt=media) # Data Format Each image from `img` folder is accompanied by an XML-annotation in the `annotations.xml` file indicating the Hamilton–Norwood type of hairloss for each person in the dataset. # Example of XML file structure ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F618942%2F65fe0cc71ef8761886faf32a392f6a58%2F.png?generation=1686042340119038&alt=media) ## [**TrainingData**](https://trainingdata.pro/data-market?utm_source=huggingface&utm_medium=cpc&utm_campaign=bald_classification) provides high-quality data annotation tailored to your needs More datasets in TrainingData's Kaggle account: **https://www.kaggle.com/trainingdatapro/datasets** TrainingData's GitHub: **https://github.com/Trainingdata-datamarket/TrainingData_All_datasets**
TrainingDataPro/bald_classification
[ "task_categories:image-classification", "language:en", "license:cc-by-nd-4.0", "code", "region:us" ]
2023-06-06T09:09:26+00:00
{"language": ["en"], "license": "cc-by-nd-4.0", "task_categories": ["image-classification"], "tags": ["code"], "dataset_info": {"features": [{"name": "image_id", "dtype": "int32"}, {"name": "image", "dtype": "image"}, {"name": "annotations", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 183544614, "num_examples": 42}], "download_size": 183335948, "dataset_size": 183544614}}
2023-09-14T15:53:12+00:00
74e228093a85d09b0f987f9711ad1809ac689293
# Dataset Card for "Salesbot" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Amirkid/Salesbot
[ "region:us" ]
2023-06-06T09:12:16+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 191868, "num_examples": 960}], "download_size": 0, "dataset_size": 191868}}
2023-06-06T09:21:38+00:00
78d62b50e49e94c8abe60c7d0fcf30c89c2b172f
# Dataset Card for Dataset Name ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
Idrizorg/WER_Evaluation_For_TTS
[ "task_categories:text-to-speech", "language:en", "region:us" ]
2023-06-06T09:34:50+00:00
{"language": ["en"], "task_categories": ["text-to-speech"], "pretty_name": "SOMOS"}
2023-06-06T15:06:07+00:00
5eb31f34c36afe477e03644242057ab276895876
# Dataset Card for "cnn_dailymail_azure_pt_pt" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
arubenruben/cnn_dailymail_azure_pt_pt
[ "task_categories:summarization", "task_categories:translation", "language:pt", "Machine Translation", "region:us" ]
2023-06-06T10:02:22+00:00
{"language": ["pt"], "task_categories": ["summarization", "translation"], "pretty_name": "Portuguese CNN-Dailymail-Azure", "dataset_info": {"features": [{"name": "document", "dtype": "string"}, {"name": "summary", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 33317736, "num_examples": 7729}, {"name": "validation", "num_bytes": 14690610, "num_examples": 3810}, {"name": "test", "num_bytes": 33051715, "num_examples": 7298}], "download_size": 48224108, "dataset_size": 81060061}, "tags": ["Machine Translation"]}
2023-06-06T10:08:32+00:00
f2191973957257e327e18fa5a6f675de052fa950
# Dataset Card for "cnn_dailymail_google_translator" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
arubenruben/cnn_dailymail_google_translator
[ "task_categories:summarization", "task_categories:translation", "language:pt", "Machine Translation", "region:us" ]
2023-06-06T10:02:38+00:00
{"language": ["pt"], "task_categories": ["summarization", "translation"], "pretty_name": "Portuguese CNN-Dailymail-Google", "dataset_info": {"features": [{"name": "document", "dtype": "string"}, {"name": "summary", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 43257744, "num_examples": 10000}, {"name": "validation", "num_bytes": 19194954, "num_examples": 5000}, {"name": "test", "num_bytes": 45552717, "num_examples": 10000}], "download_size": 64070699, "dataset_size": 108005415}, "tags": ["Machine Translation"]}
2023-06-06T10:07:14+00:00
9fba6886f8d4af31c8d7ccd4103988ee6c490de5
# COCO 2017 image captions in Vietnamese The dataset is firstly introduced in [dinhanhx/VisualRoBERTa](https://github.com/dinhanhx/VisualRoBERTa/tree/main). I use VinAI tools to translate [COCO 2027 image caption](https://cocodataset.org/#download) (2017 Train/Val annotations) from English to Vietnamese. Then we merge [UIT-ViIC](https://arxiv.org/abs/2002.00175) dataset into it. To load the dataset, one can take a look at [this code in VisualRoBERTa](https://github.com/dinhanhx/VisualRoBERTa/blob/main/src/data.py#L22-L100) or [this code in Velvet](https://github.com/dinhanhx/velvet/blob/main/velvet/dataset/coco.py). I provide both English original and Vietnamese version (including UIT-ViIC). ⚠ Note: - UIT-ViIC splits are originated from `en/captions_train2017.json`. Therefore, I combine all UIT-ViIC splits then I merge into `vi/captions_train2017_trans.json`. As a result, I get `captions_train2017_trans_plus.json`. - `vi/captions_train2017_trans.json` and `vi/captions_val2017_trans.json` are VinAI-translated from the ones in `en/`.
dinhanhx/coco-2017-vi
[ "task_categories:image-to-text", "task_ids:image-captioning", "language:vi", "language:en", "license:unknown", "coco", "coco-2017-vi", "Vietnamese", "arxiv:2002.00175", "region:us" ]
2023-06-06T10:02:46+00:00
{"language": ["vi", "en"], "license": "unknown", "task_categories": ["image-to-text"], "task_ids": ["image-captioning"], "pretty_name": "COCO 2017 image captions in Vietnamese", "source-datasets": ["ms coco"], "tags": ["coco", "coco-2017-vi", "Vietnamese"]}
2023-11-09T09:03:38+00:00
b36d466bf27035361a19bbdb49081ce2278b491d
Original Dataset [JeanKaddour/minipile](https://huggingface.co/datasets/JeanKaddour/minipile) See the [Thought Tokens Repository](https://github.com/ZelaAI/thought-tokens) for demonstration of streaming usage of this dataset and specific implementation of how this dataset was prepared. Tokenized with the GPTNeoX tokenizer, split into sequences of length 513, intended for 512 input and 512 target ids.
ZelaAI/minipile_512_streamable
[ "task_categories:text-generation", "size_categories:1B<n<10B", "region:us" ]
2023-06-06T10:12:32+00:00
{"size_categories": ["1B<n<10B"], "task_categories": ["text-generation"], "pretty_name": "Streamable Minipile Tokenized"}
2023-06-06T11:09:21+00:00
3d659be20f854c6a3f1a2917cc486767dda10543
Venkatesh4342/NER_augmented_indian
[ "license:apache-2.0", "region:us" ]
2023-06-06T10:14:25+00:00
{"license": "apache-2.0"}
2023-06-06T10:15:11+00:00
89db62f7f93ba74bb0f73fd417cebd971f23859b
MetroCat/HEBREW-MIL-CLEAN
[ "license:gpl", "region:us" ]
2023-06-06T10:17:39+00:00
{"license": "gpl"}
2023-06-06T10:19:43+00:00
01609a8912da6b83b969f8856f2450bbd2f409c0
Rhivu/imgclassification
[ "license:bigscience-openrail-m", "region:us" ]
2023-06-06T10:31:09+00:00
{"license": "bigscience-openrail-m"}
2023-06-06T10:31:09+00:00
499cccb5ce773ca0e5b464082573223a76d9f22e
# Dataset Card for "augmented_images_perplexity" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
rbeauchamp/augmented_images_perplexity
[ "region:us" ]
2023-06-06T10:35:37+00:00
{"dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "image_path", "dtype": "string"}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 1000689543.812, "num_examples": 1052}], "download_size": 1001267552, "dataset_size": 1000689543.812}}
2023-06-06T10:36:11+00:00
aed8e89c77a5693bff3d77ab62a35654b35ca84b
# Dataset Card for "StellarX-4b-SWE2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Dampish/StellarX-4b-SWE2
[ "region:us" ]
2023-06-06T10:36:17+00:00
{"dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 5480326.0, "num_examples": 1000}], "download_size": 675495, "dataset_size": 5480326.0}}
2023-06-06T10:36:19+00:00
5ff93c7a9257690207525b479550ce0934b729a0
taln-ls2n/SSI
[ "task_categories:text-classification", "language:en", "region:us" ]
2023-06-06T10:39:10+00:00
{"language": ["en"], "task_categories": ["text-classification"]}
2023-06-06T10:49:21+00:00
410c7abc86fec94b09ae229931b621fe6e719df5
The data was generated by gpt-4, and therefore is subject to OpenAI ToS. The tool used to generate the data [airoboros](https://github.com/jondurbin/airoboros) is apache-2. Specific areas of focus for this training data: * trivia * math * nonsensical math * coding * closed context question answering * closed context question answering, with multiple contexts to choose from as confounding factors * writing * multiple choice This is largely an overlap of the original [dataset](https://huggingface.co/datasets/jondurbin/airoboros-gpt4), but with a few extras: * fixed contextual entries that were missing closing tags (e.g. "ENDINPUT", "ENDINSTRUCTION", etc.) * fixed an issue where source information was provided, even if not asked (the model always tried to provide source info) * added some questions that were unrelated to the provided context, to train the model to say when it can't provide an answer * added several new contexual instructions, including some with FAQ style to hopefully prevent questions in the context from breaking the inference * hundreds more coding samples, focusing primarily on python, java, javascript, c/c++, and golang ### Usage and License Notices All airoboros models and datasets are intended and licensed for research use only. I've used the 'cc-nc-4.0' license, but really it is subject to a custom/special license because: - the base model is LLaMa, which has it's own special research license - the dataset(s) were generated with OpenAI (gpt-4 and/or gpt-3.5-turbo), which has a clausing saying the data can't be used to create models to compete with openai So, to reiterate: this model (and datasets) cannot be used commercially.
jondurbin/airoboros-gpt4-1.1
[ "license:cc-by-nc-4.0", "region:us" ]
2023-06-06T10:50:04+00:00
{"license": "cc-by-nc-4.0"}
2023-06-22T14:00:56+00:00
c7ec6587b1d9cfa0739ef1087daaae2ae44c5918
# Dataset Card for "counterfact-lm-neg" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
derpyplops/counterfact-lm-neg
[ "region:us" ]
2023-06-06T11:20:21+00:00
{"dataset_info": {"features": [{"name": "relation_id", "dtype": "int64"}, {"name": "original_prompt", "dtype": "string"}, {"name": "original_target_true", "dtype": "string"}, {"name": "original_target_false", "dtype": "string"}, {"name": "inverted_prompt_true", "dtype": "string"}, {"name": "inverted_prompt_false", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 409918.5, "num_examples": 2461}, {"name": "test", "num_bytes": 409918.5, "num_examples": 2461}], "download_size": 480194, "dataset_size": 819837.0}}
2023-06-06T11:31:41+00:00
c1c9527f0101a11bb419d9ea2b1b9385ce95d7f7
# Dataset Card for "llm-pdf-chunks-qa-mix" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
prognosis/llm-pdf-chunks-qa-mix
[ "region:us" ]
2023-06-06T11:31:04+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}], "splits": [{"name": "train", "num_bytes": 20498196.0, "num_examples": 2501}, {"name": "test", "num_bytes": 2278488.0, "num_examples": 278}], "download_size": 11071207, "dataset_size": 22776684.0}}
2023-06-06T11:31:24+00:00
0bcfdf2c75e2f61e4bf5e2cdb5aeb1a82ec201d3
# MedNERF ## Dataset Description - **Paper:** [Multilingual Clinical NER: Translation or Cross-lingual Transfer?](https://arxiv.org/abs/2306.04384) - **Point of Contact:** [email]([email protected]) ### Dataset Summary MedNERF is a French medical NER dataset whose aim is to serve as a test set for medical NER models. It has been built using a sample of French medical prescriptions annotated with the same guidelines as the [n2c2 dataset](https://academic.oup.com/jamia/article-abstract/27/1/3/5581277?redirectedFrom=fulltext&login=false). Entities are annotated with the following labels: `Drug`, `Strength`, `Form`, `Dosage`, `Duration` and `Frequency`, using the IOB format. ## Licensing Information This dataset is distributed under the Creative Commons Attribution Non Commercial Share Alike 4.0 license. ## Citation information ``` @inproceedings{mednerf, title = "Multilingual Clinical NER: Translation or Cross-lingual Transfer?", author = "Gaschi, Félix and Fontaine, Xavier and Rastin, Parisa and Toussaint, Yannick", booktitle = "Proceedings of the 5th Clinical Natural Language Processing Workshop", publisher = "Association for Computational Linguistics", year = "2023" } ```
Posos/MedNERF
[ "task_categories:token-classification", "size_categories:n<1K", "language:fr", "license:cc-by-nc-sa-4.0", "medical", "arxiv:2306.04384", "region:us" ]
2023-06-06T11:50:48+00:00
{"language": ["fr"], "license": "cc-by-nc-sa-4.0", "size_categories": ["n<1K"], "task_categories": ["token-classification"], "pretty_name": "MedNERF", "tags": ["medical"]}
2023-06-07T12:55:06+00:00
c004fe046d913565d353e68e2c37976d2c5120fb
mehmetkay-sudo/movies_set
[ "license:gpl-2.0", "region:us" ]
2023-06-06T12:09:03+00:00
{"license": "gpl-2.0"}
2023-06-10T13:11:34+00:00
eb251d17bd23b0bfd35c6a08b43c3d98f94ea4fe
# Dataset Card for "mm_reviews" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
PierreVie/mm_reviews
[ "region:us" ]
2023-06-06T12:14:53+00:00
{"dataset_info": {"features": [{"name": "message_customer", "dtype": "string"}, {"name": "Description technique/conformit\u00e9/montage", "dtype": "string"}, {"name": "Livraison (vitesse, probl\u00e8me)", "dtype": "string"}, {"name": "Etat du produit, probl\u00e8me de fonctionnement", "dtype": "float64"}, {"name": "Qualit\u00e9 / Prix / Rapport qualit\u00e9 prix", "dtype": "string"}, {"name": "Aspect produit", "dtype": "string"}, {"name": "Avis g\u00e9n\u00e9ral ou service ManoMano / SAV", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 27108.294392523363, "num_examples": 171}, {"name": "test", "num_bytes": 6816.705607476635, "num_examples": 43}], "download_size": 29295, "dataset_size": 33925.0}}
2023-06-06T12:14:57+00:00
fbc156d41e4d15ead5b06e5d7f7c8c9121c53f59
# Dataset Card for "hotpotExtendedAnoLM_rechunked" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ghomasHudson/hotpotExtendedAnoLM_rechunked
[ "region:us" ]
2023-06-06T12:22:04+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1179895402, "num_examples": 672050}, {"name": "validation", "num_bytes": 106554129, "num_examples": 57844}], "download_size": 674308492, "dataset_size": 1286449531}}
2023-06-06T12:38:14+00:00
a2b5becd8feed6bc2c585798e7c49dc586a9af93
rxsmzfg/1
[ "license:openrail", "region:us" ]
2023-06-06T12:27:15+00:00
{"license": "openrail"}
2023-06-06T12:28:58+00:00
8b3d3c7a3e875aaec13becb0020ecf410c5e54bb
# Dataset Card for "divi7007" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
divi7007/divi7007
[ "region:us" ]
2023-06-06T12:33:26+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 1966464, "num_examples": 48}], "download_size": 0, "dataset_size": 1966464}}
2023-06-12T08:26:32+00:00
954b3416a154b664ecf954273a164339ae5dadfb
# Wikipedia Turkish Summarization Dataset ## Dataset Description This is a Turkish summarization dataset 🇹🇷 prepared from the 2023 Wikipedia dump. The dataset has been cleaned, tokenized, and summarized using Huggingface Wikipedia dataset cleaner script, custom cleaning scripts, and OpenAI's gpt3.5-turbo API. ### Data Source - Wikipedia's latest Turkish dump (2023 version) 🌐 ### Features - text: string (The original text extracted from Wikipedia articles 📖) - summary: string (The generated summary of the original text 📝) ### Data Splits | Split | Num Bytes | Num Examples | |------------|--------------------|--------------| | train | 324,460,408.048 | 119,110 | | validation | 17,077,006.952 | 6,269 | ### Download Size - 216,029,002 bytes ### Dataset Size - 341,537,415 bytes ## Data Preparation ### Data Collection 1. The latest Turkish Wikipedia dump was downloaded 📥. 2. Huggingface Wikipedia dataset cleaner script was used to clean the text 🧹. 3. A custom script was used to further clean the text, removing sections like "Kaynakca" (References) and other irrelevant information 🛠️. ### Tokenization The dataset was tokenized using Google's MT5 tokenizer. The following criteria were applied: - Articles with a token count between 300 and 900 were selected ✔️. - Articles with less than 300 tokens were ignored ❌. - For articles with more than 900 tokens, only the first 900 tokens ending with a paragraph were selected 🔍. ### Summarization The generated raw texts were summarized using OpenAI's gpt3.5-turbo API 🤖. ## Dataset Usage This dataset can be used for various natural language processing tasks 👩‍💻, such as text summarization, machine translation, and language modeling in the Turkish language. Example usage: ```python from datasets import load_dataset # Load the dataset dataset = load_dataset("musabg/wikipedia-tr-summarization") # Access the data train_data = dataset["train"] validation_data = dataset["validation"] # Iterate through the data for example in train_data: text = example["text"] summary = example["summary"] # Process the data as needed ``` Please make sure to cite the dataset as follows 📝: ```bibtex @misc{musabg2023wikipediatrsummarization, author = {Musab Gultekin}, title = {Wikipedia Turkish Summarization Dataset}, year = {2023}, publisher = {HuggingFace}, howpublished = {\url{https://huggingface.co/datasets/musabg/wikipedia-tr-summarization}}, } ``` --- ## Wikipedia Türkçe Özetleme Veri Seti Bu, 2023 Wikipedia dökümünden hazırlanan Türkçe özetleme veri kümesidir. Veri kümesi, Huggingface Wikipedia veri kümesi temizleme betiği, özel temizleme betikleri ve OpenAI'nin gpt3.5-turbo API'si kullanılarak temizlenmiş, tokenleştirilmiş ve özetlenmiştir. ### Veri Kaynağı - Wikipedia'nın en güncel Türkçe dökümü (2023 sürümü) ### Özellikler - text: string (Wikipedia makalelerinden çıkarılan orijinal metin) - summary: string (Orijinal metnin oluşturulan özeti) ### Veri Bölümleri | Bölüm | Numara Baytı | Örnek Sayısı | |------------|--------------------|--------------| | train | 324.460.408,048 | 119.110 | | validation | 17.077.006,952 | 6.269 | ### İndirme Boyutu - 216.029.002 bayt ### Veri Kümesi Boyutu - 341.537.415 bayt ## Veri Hazırlama ### Veri Toplama 1. En güncel Türkçe Wikipedia dökümü indirildi. 2. Huggingface Wikipedia veri kümesi temizleme betiği metni temizlemek için kullanıldı. 3. "Kaynakça" (Referanslar) gibi bölümleri ve diğer alakasız bilgileri kaldırmak için özel bir betik kullanıldı. ### Tokenleştirme Veri kümesi, Google'ın MT5 tokenleştiricisi kullanılarak tokenleştirildi. Aşağıdaki kriterler uygulandı: - 300 ile 900 token arasında olan makaleler seçildi. - 300'den az tokeni olan makaleler dikkate alınmadı. - 900'den fazla tokeni olan makalelerde, sadece bir paragraf ile biten ilk 900 token kısmı alındı. ### Özetleme Oluşturulan ham metinler, OpenAI'nin gpt3.5-turbo API'si kullanılarak özetlendi. ## Veri Kümesi Kullanımı Bu veri kümesi, Türkçe dilinde metin özetleme, makine çevirisi ve dil modelleme gibi çeşitli doğal dil işleme görevleri için kullanılabilir. Örnek kullanım: ```python from datasets import load_dataset # Veri kümesini yükle dataset = load_dataset("musabg/wikipedia-tr-summarization") # Verilere erişin train_data = dataset["train"] validation_data = dataset["validation"] # Verilerin üzerinden geçin for example in train_data: text = example["text"] summary = example["summary"] # Veriyi gerektiği gibi işleyin ```
musabg/wikipedia-tr-summarization
[ "task_categories:summarization", "size_categories:100K<n<1M", "language:tr", "region:us" ]
2023-06-06T12:39:57+00:00
{"language": ["tr"], "size_categories": ["100K<n<1M"], "task_categories": ["summarization"], "pretty_name": "Wikipedia Turkish Summarization", "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 324460408.0479985, "num_examples": 119110}, {"name": "validation", "num_bytes": 17077006.95200153, "num_examples": 6269}], "download_size": 216029002, "dataset_size": 341537415}}
2023-06-13T03:29:02+00:00
c189929d585910d8ca42de3f90845534ccae41ba
# Instruct HumanEval ## Summary InstructHumanEval is a modified version of OpenAI HumanEval. For a given prompt, we extracted its signature, its docstring as well as its header to create a flexing setting which would allow to evaluation instruction-tuned LLM. The delimiters used in the instruction-tuning procedure can be use to build and instruction that would allow the model to elicit its best capabilities. Here is an example of use The prompt can be built as follows, depending on the model's instruction tuning delimiters ```python from datasets import load_dataset ds = load_dataset("codeparrot/instructhumaneval", split="test", use_auth_token=True) prompt_0 = "Human\n" + ds[0]["instruction"] + "\nAssistant\n" + ds[0]["context"] print(prompt_0) ``` Output ``` Human: Write a function has_close_elements(numbers: List[float], threshold: float) -> bool to solve the following problem: Check if in given list of numbers, are any two numbers closer to each other than given threshold. >>> has_close_elements([1.0, 2.0, 3.0], 0.5) False >>> has_close_elements([1.0, 2.8, 3.0, 4.0, 5.0, 2.0], 0.3) True Assistant: from typing import List def has_close_elements(numbers: List[float], threshold: float) -> bool: ``` The model can therefore complete the instruction and yield better results because it fits its training procedure. You can also find the code to evaluate models on the dataset in the [BigCode-evaluation-harness](https://github.com/bigcode-project/bigcode-evaluation-harness/tree/main). The following sections provide more details on the dataset. ## Dataset description This dataset is a modified version of [OpenAI HumanEval](https://huggingface.co/datasets/openai_humaneval) that is designed to adapt the benchmark to instruction fine-tuned models. As a matter of fact, HumanEval evaluates the ability to complete a code given its signature, its docstring and potentially some auxiliary functions. ## Dataset construction In order to build an instruction version of HumanEval we extracted relevant information from the **prompt** column of the original version - **signature** : this is the signature of the function to complete. It looks like `def function_name(args:type):-> return_type`. - **docstring** : this is the docstring of the function. It is the text which describes the purpose of the function. - **context** : this represents every additional information that is provided in order to help the model complete the function. It includes the imports and the auxiliary functions. Our idea was to move from the original format of HumanEval ``` <context> <signature> <docstring> ``` And build and **instruction** that would be ``` Write a function <signature> to solve the following problem: <docstring> ``` From this instruction, we can design an evaluation pipeline for instruction fine-tuned languages models. ## Evaluation Instruction fine-tuned LLM are built by fine-tuning a base LLM on an instruction dataset. This instruction dataset contains several <input, output> pairs where each represent an instruction submitted by a user together with the right answer to it. These pairs are framed into a multi-turn conversation with the help of special tokens which design each member of the interaction e.g. Q user_token `Human:`, an assistant_token `Assistant:` and and `end_token` `\n` that designates the end of each turn. ### Code completion In this case, the LLM is provided with the following prompt ``` user_token + <instruction> + <end_token> + <assistant_token> + <context> ``` It is the expected to complete the function to solve the problem formulated by the `instruction`. It is very similar to the original evaluation with the advantage that it puts the model in the best condition to understand the task that it is asked to solve. The evaluation is done on the part generated after `<assistant_token>`. ### Docstring to code This setting is more complicated as it requires to model to account for the information contained in the instruction such as the function signature. The LLM is provided with the following prompt ``` user_token + <instruction> + <end_token> + <assistant_token> ``` The model has to generate a function with the correct signature that solve adequately the problem. The evaluation is done by identifying the content of the function in the generation (by search for the right `entry_point`/`function_name`) and concatenating it with the `<context>` provided. ## How to use the dataset ```python from datasets import load_dataset ds = load_dataset("codeparrot/instructhumaneval") ``` ``` ds DatasetDict({ test: Dataset({ features: ['task_id', 'prompt', 'canonical_solution', 'test', 'entry_point', 'signature', 'docstring', 'context', 'instruction'], num_rows: 164 }) }) ```
codeparrot/instructhumaneval
[ "region:us" ]
2023-06-06T12:52:48+00:00
{"dataset_info": {"features": [{"name": "task_id", "dtype": "string"}, {"name": "prompt", "dtype": "string"}, {"name": "canonical_solution", "dtype": "string"}, {"name": "test", "dtype": "string"}, {"name": "entry_point", "dtype": "string"}, {"name": "signature", "dtype": "string"}, {"name": "docstring", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "instruction", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 335913, "num_examples": 164}], "download_size": 161076, "dataset_size": 335913}}
2023-06-13T14:58:34+00:00
5b9475800c4986900d9f2afb14ce85e3d7e79b64
## GitHub R repositories dataset R source files from GitHub. This dataset has been created using the public GitHub datasets from Google BigQuery. This is the actual query that has been used to export the data: ``` EXPORT DATA OPTIONS ( uri = 'gs://your-bucket/gh-r/*.parquet', format = 'PARQUET') as ( select f.id, f.repo_name, f.path, c.content, c.size from ( SELECT distinct id, repo_name, path FROM `bigquery-public-data.github_repos.files` where ends_with(path, ".R") ) as f left join `bigquery-public-data.github_repos.contents` as c on f.id = c.id ) EXPORT_DATA OPTIONS ( uri = 'gs://your-bucket/licenses.parquet', format = 'PARQUET') as (select * from `bigquery-public-data.github_repos.licenses`) ``` Files were then exported and processed locally with files in the root of this repository. Datasets in this repository contain data from reositories with different licenses. The data schema is: ``` id: string repo_name: string path: string content: string size: int32 license: string ``` Last updated: Jun 6th 2023
dfalbel/github-r-repos
[ "task_categories:text-generation", "size_categories:100K<n<1M", "language:code", "license:other", "region:us" ]
2023-06-06T13:01:29+00:00
{"language": ["code"], "license": "other", "size_categories": ["100K<n<1M"], "task_categories": ["text-generation"], "pretty_name": "github-r-repos"}
2023-07-11T10:39:18+00:00
626edd362212f58b84ef7dc94f0a9d3557cd8889
# Crossmodal-3600: A Massively Multilingual Multimodal Evaluation Dataset ## Abstract Research in massively multilingual image captioning has been severely hampered by a lack of high-quality evaluation datasets. In this paper we present the Crossmodal-3600 dataset (XM3600 in short), a geographically-diverse set of 3600 images annotated with human-generated reference captions in 36 languages. The images were selected from across the world, covering regions where the 36 languages are spoken, and annotated with captions that achieve consistency in terms of style across all languages, while avoiding annotation artifacts due to direct translation. We apply this benchmark to model selection for massively multilingual image captioning models, and show strong correlation results with human evaluations when using XM3600 as golden references for automatic metrics. [Original source](https://google.github.io/crossmodal-3600/)
dinhanhx/crossmodal-3600
[ "task_categories:image-to-text", "task_ids:image-captioning", "source_datasets:wikipedia", "source_datasets:google", "language:ar", "language:bn", "language:cs", "language:da", "language:de", "language:el", "language:en", "language:es", "language:fa", "language:fi", "language:fil", "language:fr", "language:hi", "language:hr", "language:hu", "language:id", "language:it", "language:he", "language:ja", "language:ko", "language:mi", "language:nl", "language:no", "language:pl", "language:pt", "language:quz", "language:ro", "language:ru", "language:sv", "language:sw", "language:te", "language:th", "language:tr", "language:uk", "language:vi", "language:zh", "license:other", "crossmodal-3600", "region:us" ]
2023-06-06T13:07:54+00:00
{"language": ["ar", "bn", "cs", "da", "de", "el", "en", "es", "fa", "fi", "fil", "fr", "hi", "hr", "hu", "id", "it", "he", "ja", "ko", "mi", "nl", false, "pl", "pt", "quz", "ro", "ru", "sv", "sw", "te", "th", "tr", "uk", "vi", "zh"], "license": "other", "source_datasets": ["wikipedia", "google"], "task_categories": ["image-to-text"], "task_ids": ["image-captioning"], "pretty_name": "Crossmodal-3600: A Massively Multilingual Multimodal Evaluation Dataset", "tags": ["crossmodal-3600"]}
2023-06-06T13:38:51+00:00
29d3ca7ef8b871684cdd9df3a16b32044bd43fcc
# Dataset Card for "unsplash_5k_canny" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
wtcherr/unsplash_5k_canny
[ "region:us" ]
2023-06-06T13:14:57+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "guide", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 857588026.0, "num_examples": 5000}], "download_size": 856975356, "dataset_size": 857588026.0}}
2023-06-06T13:15:39+00:00
3766fbf95f7f3a05516159f3c8c67d887ac4ef97
dop13/ss
[ "region:us" ]
2023-06-06T13:30:13+00:00
{}
2023-06-06T13:32:14+00:00
7cd48801bbe0d8fa7234d325b02329b484aa6751
# Dataset Card for "tarteel-ai-quran-tafsir" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Salama1429/tarteel-ai-quran-tafsir
[ "region:us" ]
2023-06-06T13:41:29+00:00
{"dataset_info": {"features": [{"name": "en-ahmedali", "dtype": "string"}, {"name": "en-ahmedraza", "dtype": "string"}, {"name": "en-arberry", "dtype": "string"}, {"name": "en-asad", "dtype": "string"}, {"name": "en-daryabadi", "dtype": "string"}, {"name": "en-hilali", "dtype": "string"}, {"name": "en-itani", "dtype": "string"}, {"name": "en-maududi", "dtype": "string"}, {"name": "en-mubarakpuri", "dtype": "string"}, {"name": "en-pickthall", "dtype": "string"}, {"name": "en-qarai", "dtype": "string"}, {"name": "en-qaribullah", "dtype": "string"}, {"name": "en-sahih", "dtype": "string"}, {"name": "en-sarwar", "dtype": "string"}, {"name": "en-shakir", "dtype": "string"}, {"name": "en-transliterati", "dtype": "string"}, {"name": "en-wahiduddi", "dtype": "string"}, {"name": "en-yusufali", "dtype": "string"}, {"name": "surah", "dtype": "int64"}, {"name": "ayah", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 16266291, "num_examples": 6236}], "download_size": 9040639, "dataset_size": 16266291}}
2023-06-06T13:41:32+00:00
c426a819bb48cfedf61ba0fa297047fe7ba5ff9e
# Dataset Card for the Qur'anic Reading Comprehension Dataset (QRCD) ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** https://sites.google.com/view/quran-qa-2022/home - **Repository:** https://gitlab.com/bigirqu/quranqa/-/tree/main/ - **Paper:** https://dl.acm.org/doi/10.1145/3400396 - **Leaderboard:** - **Point of Contact:** @piraka9011 ### Dataset Summary The QRCD (Qur'anic Reading Comprehension Dataset) is composed of 1,093 tuples of question-passage pairs that are coupled with their extracted answers to constitute 1,337 question-passage-answer triplets. ### Supported Tasks and Leaderboards This task is evaluated as a ranking task. To give credit to a QA system that may retrieve an answer (not necessarily at the first rank) that does not fully match one of the gold answers but partially matches it, we use partial Reciprocal Rank (pRR) measure. It is a variant of the traditional Reciprocal Rank evaluation metric that considers partial matching. pRR is the official evaluation measure of this shared task. We will also report Exact Match (EM) and F1@1, which are evaluation metrics applied only on the top predicted answer. The EM metric is a binary measure that rewards a system only if the top predicted answer exactly matches one of the gold answers. Whereas, the F1@1 metric measures the token overlap between the top predicted answer and the best matching gold answer. To get an overall evaluation score, each of the above measures is averaged over all questions. ### Languages Qur'anic Arabic ## Dataset Structure ### Data Instances To simplify the structure of the dataset, each tuple contains one passage, one question and a list that may contain one or more answers to that question, as shown below: ```json { "pq_id": "38:41-44_105", "passage": "واذكر عبدنا أيوب إذ نادى ربه أني مسني الشيطان بنصب وعذاب. اركض برجلك هذا مغتسل بارد وشراب. ووهبنا له أهله ومثلهم معهم رحمة منا وذكرى لأولي الألباب. وخذ بيدك ضغثا فاضرب به ولا تحنث إنا وجدناه صابرا نعم العبد إنه أواب.", "surah": 38, "verses": "41-44", "question": "من هو النبي المعروف بالصبر؟", "answers": [ { "text": "أيوب", "start_char": 12 } ] } ``` Each Qur’anic passage in QRCD may have more than one occurrence; and each passage occurrence is paired with a different question. Likewise, each question in QRCD may have more than one occurrence; and each question occurrence is paired with a different Qur’anic passage. The source of the Qur'anic text in QRCD is the Tanzil project download page, which provides verified versions of the Holy Qur'an in several scripting styles. We have chosen the simple-clean text style of Tanzil version 1.0.2. ### Data Fields * `pq_id`: Sample ID * `passage`: Context text * `surah`: Surah number * `verses`: Verse range * `question`: Question text * `answers`: List of answers and their start character ### Data Splits | **Dataset** | **%** | **# Question-Passage Pairs** | **# Question-Passage-Answer Triplets** | |-------------|:-----:|:-----------------------------:|:---------------------------------------:| | Training | 65% | 710 | 861 | | Development | 10% | 109 | 128 | | Test | 25% | 274 | 348 | | All | 100% | 1,093 | 1,337 | ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information The QRCD v1.1 dataset is distributed under the CC-BY-ND 4.0 License https://creativecommons.org/licenses/by-nd/4.0/legalcode For a human-readable summary of (and not a substitute for) the above CC-BY-ND 4.0 License, please refer to https://creativecommons.org/licenses/by-nd/4.0/ ### Citation Information ``` @article{malhas2020ayatec, author = {Malhas, Rana and Elsayed, Tamer}, title = {AyaTEC: Building a Reusable Verse-Based Test Collection for Arabic Question Answering on the Holy Qur’an}, year = {2020}, issue_date = {November 2020}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, volume = {19}, number = {6}, issn = {2375-4699}, url = {https://doi.org/10.1145/3400396}, doi = {10.1145/3400396}, journal = {ACM Trans. Asian Low-Resour. Lang. Inf. Process.}, month = {oct}, articleno = {78}, numpages = {21}, keywords = {evaluation, Classical Arabic} } ``` ### Contributions Thanks to [@piraka9011](https://github.com/piraka9011) for adding this dataset.
Salama1429/tarteel-ai-QuranQA
[ "task_categories:question-answering", "task_ids:extractive-qa", "annotations_creators:expert-generated", "language_creators:expert-generated", "multilinguality:monolingual", "size_categories:n<1K", "size_categories:1K<n<10K", "source_datasets:original", "language:ar", "license:cc-by-nd-4.0", "quran", "qa", "region:us" ]
2023-06-06T13:41:43+00:00
{"annotations_creators": ["expert-generated"], "language_creators": ["expert-generated"], "language": ["ar"], "license": ["cc-by-nd-4.0"], "multilinguality": ["monolingual"], "size_categories": ["n<1K", "1K<n<10K"], "source_datasets": ["original"], "task_categories": ["question-answering"], "task_ids": ["extractive-qa"], "pretty_name": "Qur'anic Reading Comprehension Dataset", "tags": ["quran", "qa"]}
2023-06-06T13:46:54+00:00
8b90ba2e27db6b412fe14c8796a3c7156f8cf9ad
# Dataset Card for "github-issues" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mindcraft-nl/github-issues
[ "region:us" ]
2023-06-06T13:45:37+00:00
{"dataset_info": {"features": [{"name": "url", "dtype": "string"}, {"name": "repository_url", "dtype": "string"}, {"name": "labels_url", "dtype": "string"}, {"name": "comments_url", "dtype": "string"}, {"name": "events_url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "number", "dtype": "int64"}, {"name": "title", "dtype": "string"}, {"name": "user", "struct": [{"name": "login", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "avatar_url", "dtype": "string"}, {"name": "gravatar_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "followers_url", "dtype": "string"}, {"name": "following_url", "dtype": "string"}, {"name": "gists_url", "dtype": "string"}, {"name": "starred_url", "dtype": "string"}, {"name": "subscriptions_url", "dtype": "string"}, {"name": "organizations_url", "dtype": "string"}, {"name": "repos_url", "dtype": "string"}, {"name": "events_url", "dtype": "string"}, {"name": "received_events_url", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "site_admin", "dtype": "bool"}]}, {"name": "labels", "list": [{"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "color", "dtype": "string"}, {"name": "default", "dtype": "bool"}, {"name": "description", "dtype": "string"}]}, {"name": "state", "dtype": "string"}, {"name": "locked", "dtype": "bool"}, {"name": "assignee", "struct": [{"name": "login", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "avatar_url", "dtype": "string"}, {"name": "gravatar_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "followers_url", "dtype": "string"}, {"name": "following_url", "dtype": "string"}, {"name": "gists_url", "dtype": "string"}, {"name": "starred_url", "dtype": "string"}, {"name": "subscriptions_url", "dtype": "string"}, {"name": "organizations_url", "dtype": "string"}, {"name": "repos_url", "dtype": "string"}, {"name": "events_url", "dtype": "string"}, {"name": "received_events_url", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "site_admin", "dtype": "bool"}]}, {"name": "assignees", "list": [{"name": "login", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "avatar_url", "dtype": "string"}, {"name": "gravatar_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "followers_url", "dtype": "string"}, {"name": "following_url", "dtype": "string"}, {"name": "gists_url", "dtype": "string"}, {"name": "starred_url", "dtype": "string"}, {"name": "subscriptions_url", "dtype": "string"}, {"name": "organizations_url", "dtype": "string"}, {"name": "repos_url", "dtype": "string"}, {"name": "events_url", "dtype": "string"}, {"name": "received_events_url", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "site_admin", "dtype": "bool"}]}, {"name": "milestone", "struct": [{"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "labels_url", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "number", "dtype": "int64"}, {"name": "title", "dtype": "string"}, {"name": "description", "dtype": "string"}, {"name": "creator", "struct": [{"name": "login", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "avatar_url", "dtype": "string"}, {"name": "gravatar_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "followers_url", "dtype": "string"}, {"name": "following_url", "dtype": "string"}, {"name": "gists_url", "dtype": "string"}, {"name": "starred_url", "dtype": "string"}, {"name": "subscriptions_url", "dtype": "string"}, {"name": "organizations_url", "dtype": "string"}, {"name": "repos_url", "dtype": "string"}, {"name": "events_url", "dtype": "string"}, {"name": "received_events_url", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "site_admin", "dtype": "bool"}]}, {"name": "open_issues", "dtype": "int64"}, {"name": "closed_issues", "dtype": "int64"}, {"name": "state", "dtype": "string"}, {"name": "created_at", "dtype": "timestamp[s]"}, {"name": "updated_at", "dtype": "timestamp[s]"}, {"name": "due_on", "dtype": "null"}, {"name": "closed_at", "dtype": "null"}]}, {"name": "comments", "sequence": "string"}, {"name": "created_at", "dtype": "timestamp[s]"}, {"name": "updated_at", "dtype": "timestamp[s]"}, {"name": "closed_at", "dtype": "timestamp[s]"}, {"name": "author_association", "dtype": "string"}, {"name": "active_lock_reason", "dtype": "null"}, {"name": "body", "dtype": "string"}, {"name": "reactions", "struct": [{"name": "url", "dtype": "string"}, {"name": "total_count", "dtype": "int64"}, {"name": "+1", "dtype": "int64"}, {"name": "-1", "dtype": "int64"}, {"name": "laugh", "dtype": "int64"}, {"name": "hooray", "dtype": "int64"}, {"name": "confused", "dtype": "int64"}, {"name": "heart", "dtype": "int64"}, {"name": "rocket", "dtype": "int64"}, {"name": "eyes", "dtype": "int64"}]}, {"name": "timeline_url", "dtype": "string"}, {"name": "performed_via_github_app", "dtype": "null"}, {"name": "state_reason", "dtype": "string"}, {"name": "draft", "dtype": "bool"}, {"name": "pull_request", "struct": [{"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "diff_url", "dtype": "string"}, {"name": "patch_url", "dtype": "string"}, {"name": "merged_at", "dtype": "timestamp[s]"}]}, {"name": "is_pull_request", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 16600004, "num_examples": 3000}], "download_size": 4692861, "dataset_size": 16600004}}
2023-06-06T13:45:39+00:00
b87b5e8aa3d85cd1846eb0673f57c4314ecca4bd
# Dataset Card for "MedQuad-dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Amirkid/MedQuad-dataset
[ "region:us" ]
2023-06-06T14:08:42+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 21658852, "num_examples": 32800}], "download_size": 8756796, "dataset_size": 21658852}}
2023-06-06T14:08:50+00:00
5e44806f34431045687e6a7cc118fb1465494307
maneshkarun/nlp
[ "license:mit", "region:us" ]
2023-06-06T14:13:59+00:00
{"license": "mit", "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "hyperpartisan", "dtype": "bool"}, {"name": "url", "dtype": "string"}, {"name": "published_at", "dtype": "string"}, {"name": "bias", "dtype": {"class_label": {"names": {"0": "right", "1": "right-center", "2": "least", "3": "left-center", "4": "left"}}}}, {"name": "word_count", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 2810510649.0, "num_examples": 600000}], "download_size": 1569261205, "dataset_size": 2810510649.0}}
2023-06-06T16:53:54+00:00
d6fb74636e36092d2e782b31131e223807bfb168
# Dataset Card for "test2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
eliaz/test2
[ "region:us" ]
2023-06-06T14:18:30+00:00
{"dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "response", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 92101495.91574256, "num_examples": 45954}, {"name": "test", "num_bytes": 23026376.08425744, "num_examples": 11489}], "download_size": 64212837, "dataset_size": 115127872.0}}
2023-06-06T14:23:01+00:00
6a76065d61bfc0f5fc977edb74c22b32bf3deed5
# Dataset Card for "1UID-11VARS-10MUTATIONS" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Amirkid/1UID-11VARS-10MUTATIONS
[ "region:us" ]
2023-06-06T14:19:30+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 65860080, "num_examples": 108680}], "download_size": 448869, "dataset_size": 65860080}}
2023-06-06T14:19:34+00:00
f456a885d00aa7da1f65d7cef230b288b4815f7b
henrydz/Train
[ "license:apache-2.0", "region:us" ]
2023-06-06T14:23:18+00:00
{"license": "apache-2.0"}
2023-06-06T14:23:18+00:00
0219b66a52e6c49eed5c2f27b3fa9cba092b6429
Follow me on : https://facebook.com/zaanind | https://github.com/zaanind Contact : [email protected] | https://m.me/zaanind | https://t.me/zaanind Dataset Name: Eng-Sinhala Translation Dataset Description: This dataset contains approximately 80,000 lines of English-Sinhala translation pairs. It can be used to train models for machine translation tasks and other natural language processing applications. Data License: GPL (GNU General Public License). Please ensure that you comply with the terms and conditions of the GPL when using the dataset. Note: While you mentioned that some sentences in the dataset might be incorrect due to its large size, it is important to ensure the quality and accuracy of the data for training purposes. Consider performing data cleaning and validation to improve the reliability of your model. Mission Our mission is to improve the quality of open-source English to Sinhala machine translation. This dataset, consisting of 8,000 translation pairs, is a step in that direction. Special Thanks: We extend our gratitude to the data collected and cleared by the Zoom.lk subtitles team, whose contributions have been invaluable in making this dataset possible. Please feel free to reach out if you have any questions, suggestions, or would like to collaborate on further improving this dataset or machine translation models. Your support is greatly appreciated! (Contact : [email protected] | https://m.me/zaanind | https://t.me/zaanind)
zaanind/sinhala_englsih_parrel_corpus
[ "task_categories:translation", "size_categories:10K<n<100K", "language:si", "language:en", "license:gpl", "region:us" ]
2023-06-06T14:27:51+00:00
{"language": ["si", "en"], "license": "gpl", "size_categories": ["10K<n<100K"], "task_categories": ["translation"], "pretty_name": "Zoom Eng-Si Nmt Dataset", "dataset_info": {"features": [{"name": "english", "dtype": "string"}, {"name": "sinhala", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 8516909, "num_examples": 80684}], "download_size": 4162589, "dataset_size": 8516909}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-12-14T11:51:28+00:00
9e21b0f3c36bd7ce50b741ce966297a7f73cae38
# Hacker News Stories Dataset This is a dataset containing approximately 4 million stories from Hacker News, exported to a CSV file. The dataset includes the following fields: - `id` (int64): The unique identifier of the story. - `title` (string): The title of the story. - `url` (string): The URL of the story. - `score` (int64): The score of the story. - `time` (int64): The time the story was posted, in Unix time. - `comments` (int64): The number of comments on the story. - `author` (string): The username of the person who posted the story. ## Accessing the Dataset The dataset can be accessed through [Hugging Face Datasets](https://huggingface.co/datasets/julien040/hacker-news-posts). You can download the dataset in CSV format or use the Hugging Face Datasets library to load the dataset directly in your Python code. ## License The dataset is made available under the [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-nc-sa/4.0/). ## Disclaimer The dataset is provided as is, without warranty of any kind, express or implied. The owner of the dataset makes no representations or warranties, express or implied, regarding the dataset or its use. The owner of the dataset will not be liable for any damages arising out of or in connection with the use of the dataset. ## Updates The dataset will be updated regularly to include new stories from Hacker News.
julien040/hacker-news-posts
[ "size_categories:1M<n<10M", "source_datasets:Hacker News", "language:en", "license:cc-by-nc-sa-4.0", "hacker news", "region:us" ]
2023-06-06T15:19:47+00:00
{"language": ["en"], "license": "cc-by-nc-sa-4.0", "size_categories": ["1M<n<10M"], "source_datasets": ["Hacker News"], "pretty_name": "Hacker News stories dataset", "tags": ["hacker news"]}
2023-06-06T16:04:37+00:00
b5aa07f0c7792d15340203658ad722d1280ff3c4
# Dataset Card for "sst2_tiny_subset_32" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
rungalileo/sst2_tiny_subset_32
[ "region:us" ]
2023-06-06T15:23:44+00:00
{"dataset_info": {"features": [{"name": "idx", "dtype": "int32"}, {"name": "sentence", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "negative", "1": "positive"}}}}], "splits": [{"name": "train", "num_bytes": 2433, "num_examples": 32}, {"name": "validation", "num_bytes": 2433, "num_examples": 32}, {"name": "test", "num_bytes": 2433, "num_examples": 32}], "download_size": 11709, "dataset_size": 7299}}
2023-06-06T19:53:56+00:00
e46cb2f77bae5296b8755b8f045dd7aa19a2a64e
cmseibold/PAX-RayPlusPlus
[ "license:cc-by-nc-sa-2.0", "region:us" ]
2023-06-06T15:34:22+00:00
{"license": "cc-by-nc-sa-2.0"}
2023-06-06T15:59:49+00:00
b163731d32b312ec4a8336e791107ff72c770a30
include six common Time-series-forcasting dataset * ETTsmall - ETTh1 - ETTh2 - ETTm1 - ETTm2 * traffic * eletricity * illness * exchange_rate
ym0v0my/Time_series_dataset
[ "task_categories:time-series-forecasting", "region:us" ]
2023-06-06T15:51:28+00:00
{"task_categories": ["time-series-forecasting"]}
2023-06-06T15:58:09+00:00
91200ea7615adfc6659250b429505e4a0b2386ef
Fredithefish/hh-rlhf-RedPajama-Chat-Format
[ "license:apache-2.0", "region:us" ]
2023-06-06T15:58:06+00:00
{"license": "apache-2.0"}
2023-06-06T15:59:11+00:00
30f5306540f0a98880ade230109d0ca0ec6b5f57
Part of **BEIR-PL: Zero Shot Information Retrieval Benchmark for the Polish Language**. Link to arxiv: https://arxiv.org/pdf/2305.19840.pdf Contact: [email protected]
clarin-knext/scifact-pl-qrels
[ "task_categories:sentence-similarity", "language:pl", "license:cc-by-sa-4.0", "arxiv:2305.19840", "region:us" ]
2023-06-06T16:09:44+00:00
{"language": ["pl"], "license": "cc-by-sa-4.0", "task_categories": ["sentence-similarity"]}
2023-06-07T07:25:00+00:00
1e7ecaa043a5bf4739a27d96f78b1a5fa2282057
# Dataset Card for "guanaco-english" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
AlekseyKorshuk/guanaco-english
[ "region:us" ]
2023-06-06T16:12:18+00:00
{"dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "lang", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 119912138, "num_examples": 216541}], "download_size": 70267733, "dataset_size": 119912138}}
2023-06-06T16:12:22+00:00
124c9350af8ade06bfa24c4b567018035206f96d
# Dataset Card for "sharegpt_reward_modeling_pairwise" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
andersonbcdefg/sharegpt_reward_modeling_pairwise
[ "region:us" ]
2023-06-06T16:13:30+00:00
{"dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "response_a", "dtype": "string"}, {"name": "response_b", "dtype": "string"}, {"name": "explanation", "dtype": "string"}, {"name": "preferred", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 21374880, "num_examples": 11841}], "download_size": 11595826, "dataset_size": 21374880}}
2023-06-06T16:13:44+00:00
f171245712cf85dd4700b06bef18001578d0ca8d
Part of **BEIR-PL: Zero Shot Information Retrieval Benchmark for the Polish Language**. Link to arxiv: https://arxiv.org/pdf/2305.19840.pdf Contact: [email protected]
clarin-knext/nq-pl
[ "language:pl", "license:cc-by-4.0", "arxiv:2305.19840", "region:us" ]
2023-06-06T16:13:34+00:00
{"language": ["pl"], "license": "cc-by-4.0"}
2023-09-07T11:25:03+00:00
cab80d6df019cbe5785298a0c890e64eacd1d030
# Dataset Card for "sharegpt_reward_modeling_pairwise_no_as_an_ai" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
andersonbcdefg/sharegpt_reward_modeling_pairwise_no_as_an_ai
[ "region:us" ]
2023-06-06T16:13:44+00:00
{"dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "response_a", "dtype": "string"}, {"name": "response_b", "dtype": "string"}, {"name": "explanation", "dtype": "string"}, {"name": "preferred", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 21367130, "num_examples": 11841}], "download_size": 11592587, "dataset_size": 21367130}}
2023-06-06T16:13:58+00:00
5c9b2f6eeebafd6adaf88124fbf590d97e6bb90a
# Dataset Card for "guanaco-english-chatml" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
AlekseyKorshuk/guanaco-english-chatml
[ "region:us" ]
2023-06-06T16:18:50+00:00
{"dataset_info": {"features": [{"name": "conversation", "list": [{"name": "content", "dtype": "string"}, {"name": "do_train", "dtype": "bool"}, {"name": "role", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 126814007, "num_examples": 216541}], "download_size": 66350200, "dataset_size": 126814007}}
2023-06-06T16:20:35+00:00
ddefabf3934547e1e9ef807df4b6130d56e7f3cf
# Dataset Card for "21116b08" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
results-sd-v1-5-sd-v2-1-if-v1-0-karlo/21116b08
[ "region:us" ]
2023-06-06T16:27:56+00:00
{"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 174, "num_examples": 10}], "download_size": 1324, "dataset_size": 174}}
2023-06-06T16:27:57+00:00
b1bb4cd5e9f35e4c4ab4f9f7b6c61f110b7829b1
# Dataset Card for "the-stack-vault-smol" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
xlangai/the-stack-vault-smol
[ "region:us" ]
2023-06-06T16:30:56+00:00
{"dataset_info": {"features": [{"name": "hexsha", "dtype": "string"}, {"name": "size", "dtype": "int64"}, {"name": "ext", "dtype": "string"}, {"name": "lang", "dtype": "string"}, {"name": "max_stars_repo_path", "dtype": "string"}, {"name": "max_stars_repo_name", "dtype": "string"}, {"name": "max_stars_repo_head_hexsha", "dtype": "string"}, {"name": "max_stars_repo_licenses", "sequence": "string"}, {"name": "max_stars_count", "dtype": "float64"}, {"name": "max_stars_repo_stars_event_min_datetime", "dtype": "string"}, {"name": "max_stars_repo_stars_event_max_datetime", "dtype": "string"}, {"name": "max_issues_repo_path", "dtype": "string"}, {"name": "max_issues_repo_name", "dtype": "string"}, {"name": "max_issues_repo_head_hexsha", "dtype": "string"}, {"name": "max_issues_repo_licenses", "sequence": "string"}, {"name": "max_issues_count", "dtype": "float64"}, {"name": "max_issues_repo_issues_event_min_datetime", "dtype": "string"}, {"name": "max_issues_repo_issues_event_max_datetime", "dtype": "string"}, {"name": "max_forks_repo_path", "dtype": "string"}, {"name": "max_forks_repo_name", "dtype": "string"}, {"name": "max_forks_repo_head_hexsha", "dtype": "string"}, {"name": "max_forks_repo_licenses", "sequence": "string"}, {"name": "max_forks_count", "dtype": "float64"}, {"name": "max_forks_repo_forks_event_min_datetime", "dtype": "string"}, {"name": "max_forks_repo_forks_event_max_datetime", "dtype": "string"}, {"name": "content", "dtype": "string"}, {"name": "avg_line_length", "dtype": "float64"}, {"name": "max_line_length", "dtype": "int64"}, {"name": "alphanum_fraction", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 2274352.96, "num_examples": 265}], "download_size": 1001690, "dataset_size": 2274352.96}}
2023-06-06T16:30:57+00:00
116b7de47c571dea7a215f8ce97def8668007968
NekoMirra/Miya
[ "license:afl-3.0", "region:us" ]
2023-06-06T16:39:32+00:00
{"license": "afl-3.0"}
2023-06-06T16:39:32+00:00
701447de20b82bfe2d3f67566dc30cc12c08ce18
Part of **BEIR-PL: Zero Shot Information Retrieval Benchmark for the Polish Language**. Link to arxiv: https://arxiv.org/pdf/2305.19840.pdf Contact: [email protected]
clarin-knext/nq-pl-qrels
[ "language:pl", "license:cc-by-sa-4.0", "arxiv:2305.19840", "region:us" ]
2023-06-06T16:45:32+00:00
{"language": ["pl"], "license": "cc-by-sa-4.0"}
2023-06-07T07:23:58+00:00
2e535829717f8bf9dc829b7f911cc5bbd4e6608e
Part of **BEIR-PL: Zero Shot Information Retrieval Benchmark for the Polish Language**. Link to arxiv: https://arxiv.org/pdf/2305.19840.pdf Contact: [email protected]
clarin-knext/fiqa-pl
[ "language:pl", "arxiv:2305.19840", "region:us" ]
2023-06-06T16:48:25+00:00
{"language": ["pl"]}
2023-06-07T07:23:07+00:00
caf3a547739baecd9ea44e89c3232773f631b3fb
maneshkarun/median-3000
[ "license:mit", "region:us" ]
2023-06-06T16:58:54+00:00
{"license": "mit", "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "hyperpartisan", "dtype": "bool"}, {"name": "url", "dtype": "string"}, {"name": "published_at", "dtype": "string"}, {"name": "bias", "dtype": {"class_label": {"names": {"0": "right", "1": "right-center", "2": "least", "3": "left-center", "4": "left"}}}}, {"name": "word_count", "dtype": "int64"}, {"name": "cleaned_data", "dtype": "string"}, {"name": "pos_tagged", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 36155808.0, "num_examples": 500}], "download_size": 17330684, "dataset_size": 36155808.0}}
2023-06-08T06:12:15+00:00
175bb865a6e6185fbdd7beadf0c8bb258609b192
ChristophSchuhmann/Chess-Selfplay2
[ "license:apache-2.0", "region:us" ]
2023-06-06T17:07:44+00:00
{"license": "apache-2.0"}
2023-08-12T06:13:17+00:00
9803dd55222f16132de52e1c34d3872959be3f8a
# Dataset Card for "the-stack-vault" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
xlangai/the-stack-vault
[ "region:us" ]
2023-06-06T17:13:24+00:00
{"dataset_info": {"features": [{"name": "hexsha", "dtype": "string"}, {"name": "size", "dtype": "int64"}, {"name": "ext", "dtype": "string"}, {"name": "lang", "dtype": "string"}, {"name": "max_stars_repo_path", "dtype": "string"}, {"name": "max_stars_repo_name", "dtype": "string"}, {"name": "max_stars_repo_head_hexsha", "dtype": "string"}, {"name": "max_stars_repo_licenses", "sequence": "string"}, {"name": "max_stars_count", "dtype": "int64"}, {"name": "max_stars_repo_stars_event_min_datetime", "dtype": "string"}, {"name": "max_stars_repo_stars_event_max_datetime", "dtype": "string"}, {"name": "max_issues_repo_path", "dtype": "string"}, {"name": "max_issues_repo_name", "dtype": "string"}, {"name": "max_issues_repo_head_hexsha", "dtype": "string"}, {"name": "max_issues_repo_licenses", "sequence": "string"}, {"name": "max_issues_count", "dtype": "int64"}, {"name": "max_issues_repo_issues_event_min_datetime", "dtype": "string"}, {"name": "max_issues_repo_issues_event_max_datetime", "dtype": "string"}, {"name": "max_forks_repo_path", "dtype": "string"}, {"name": "max_forks_repo_name", "dtype": "string"}, {"name": "max_forks_repo_head_hexsha", "dtype": "string"}, {"name": "max_forks_repo_licenses", "sequence": "string"}, {"name": "max_forks_count", "dtype": "int64"}, {"name": "max_forks_repo_forks_event_min_datetime", "dtype": "string"}, {"name": "max_forks_repo_forks_event_max_datetime", "dtype": "string"}, {"name": "content", "dtype": "string"}, {"name": "avg_line_length", "dtype": "float64"}, {"name": "max_line_length", "dtype": "int64"}, {"name": "alphanum_fraction", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 5394495053.890719, "num_examples": 606305}], "download_size": 1987147050, "dataset_size": 5394495053.890719}}
2023-06-06T17:19:21+00:00
e2467887e6a6bcd7547d9d5999f8d9bc3323dc31
SSL4EO-L: Self-Supervised Learning for Earth Observation for the Landsat family of satellites.
torchgeo/ssl4eo_l
[ "size_categories:1M<n<10M", "license:cc0-1.0", "region:us" ]
2023-06-06T17:51:11+00:00
{"license": "cc0-1.0", "size_categories": ["1M<n<10M"], "pretty_name": "SSL4EO-L"}
2023-11-14T12:14:39+00:00
77bc9a4050d806430873182a0edc50dd16bc835a
# Dataset Card for "eurisko-website-6" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
fouadbakour/eurisko-website-6
[ "region:us" ]
2023-06-06T18:29:14+00:00
{"dataset_info": {"features": [{"name": "output", "dtype": "string"}, {"name": "input", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3973665, "num_examples": 10056}], "download_size": 1392867, "dataset_size": 3973665}}
2023-06-06T18:29:16+00:00
ea45721fb6dfb362c104ab9e633439b7cdbddf76
# Dataset Card for "unsplash_5k_blur_17KS" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
wtcherr/unsplash_5k_blur_17KS
[ "region:us" ]
2023-06-06T18:35:50+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "guide", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1699759141.0, "num_examples": 5000}], "download_size": 1699659134, "dataset_size": 1699759141.0}}
2023-06-06T18:37:10+00:00
18fcf65d76c9c3bc3357687d0341fbe99e9665a6
## Overview The Minecraft Character Dataset was used to fine-tune the Stable Diffusion [Minecraft Character Preview](https://huggingface.co/monadical-labs/minecraft-preview) model. It currently consists of 1022 images of forward facing and rear facing 3D renders of various Minecraft character skins. ## Contact Information You can contact me at: Cory Spencer \<[email protected]\> [![Monadical](logo.png)](https://monadical.com/)
monadical-labs/minecraft-preview
[ "size_categories:1K<n<10K", "language:en", "license:openrail", "minecraft", "region:us" ]
2023-06-06T19:00:41+00:00
{"language": ["en"], "license": "openrail", "size_categories": ["1K<n<10K"], "pretty_name": "Minecraft Preview Data Set", "tags": ["minecraft"]}
2023-06-15T16:08:49+00:00
b7a8905411c51e2307670ffe334dbb491ee9e981
# Dataset Card for OpenFire ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** https://pyronear.org/pyro-vision/datasets.html#openfire - **Repository:** https://github.com/pyronear/pyro-vision - **Point of Contact:** Pyronear <https://pyronear.org/en/> ### Dataset Summary OpenFire is an image classification dataset for wildfire detection, collected from web searches. ### Supported Tasks and Leaderboards - `image-classification`: The dataset can be used to train a model for Image Classification. ### Languages English ## Dataset Structure ### Data Instances A data point comprises an image URL and its binary label. ``` { 'image_url': 'https://cdn-s-www.ledauphine.com/images/13C08274-6BA6-4577-B3A0-1E6C1B2A573C/FB1200/photo-1338240831.jpg', 'is_wildfire': true, } ``` ### Data Fields - `image_url`: the download URL of the image. - `is_wildfire`: a boolean value specifying whether there is an ongoing wildfire on the image. ### Data Splits The data is split into training and validation sets. The training set contains 7143 images and the validation set 792 images. ## Dataset Creation ### Curation Rationale The curators state that the current wildfire classification datasets typically contain close-up shots of wildfires, with limited variations of weather conditions, luminosity and backrgounds, making it difficult to assess for real world performance. They argue that the limitations of datasets have partially contributed to the failure of some algorithms in coping with sun flares, foggy / cloudy weather conditions and small scale. ### Source Data #### Initial Data Collection and Normalization OpenFire was collected using images publicly indexed by the search engine DuckDuckGo using multiple relevant queries. The images were then manually cleaned to remove errors. ### Annotations #### Annotation process Each web search query was designed to yield a single label (with wildfire or without), and additional human verification was used to remove errors. #### Who are the annotators? François-Guillaume Fernandez ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators François-Guillaume Fernandez ### Licensing Information [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0). ### Citation Information ``` @software{Pyronear_PyroVision_2019, title={Pyrovision: wildfire early detection}, author={Pyronear contributors}, year={2019}, month={October}, publisher = {GitHub}, howpublished = {\url{https://github.com/pyronear/pyro-vision}} } ```
asoria/urls
[ "task_categories:image-classification", "annotations_creators:crowdsourced", "language_creators:crowdsourced", "size_categories:1K<n<10K", "source_datasets:original", "license:apache-2.0", "region:us" ]
2023-06-06T19:12:26+00:00
{"annotations_creators": ["crowdsourced"], "language_creators": ["crowdsourced"], "language": [], "license": ["apache-2.0"], "multilinguality": [], "size_categories": ["1K<n<10K"], "source_datasets": ["original"], "task_categories": ["image-classification"], "task_ids": [], "pretty_name": "Wildfire image classification dataset collected using images from web searches."}
2023-06-06T19:12:55+00:00
175733f1938524b4b392ea0eab633f167910ef63
# Dataset Card for Curated Gold Standard Hoyal Cuthill Dataset ## Dataset Description Dorsal full body images of subspecies of _Heliconius erato_ and _Heliconius melpomene_ (18 subspecies total). There are 960 images with 320 specimens (3 images of each specimen: Original/ Bird transformed/ Butterfly transformed) The original images are low-resolution RGB photographs (photographs were "cropped and resized to a height of 64 pixels (maintaining the original image aspect ratio and padded to 140 pixels wide)"(Hoyal Cuthill et al., 2019)). These low-resolution images were then transformed using [AcuityView](https://cran.r-project.org/web/packages/AcuityView/index.html) with estimates of acuity from [AcuityView 2.0](http://www.empiricalimaging.com/knowledge-base/acuityview/) and [(Land, 1997)](https://www.annualreviews.org/doi/10.1146/annurev.ento.42.1.147). Users should know that since (Land, 1997), more recent estimates of insect visual acuity have been created. This data represents a subset of images processed from Hoyal Cuthill et al. dataset available at [doi:10.5061/dryad.2hp1978](https://doi.org/10.5061/dryad.2hp1978). Their original dataset also includes ventral images. **Note:** `dorsal_images_cuthill` contains processed dorsal images from the original Hoyal Cuthill dataset (all 1,234 specimens). - **Homepage:** - **Repository:** [Butterfly-mimicry](https://github.com/Imageomics/Butterfly-mimicry) contains research done using this dataset. - **Paper:** [Imageomics Approach to Understanding Visual Biological Trait Similarities using Butterfly Mimicry as a Model System](http://rave.ohiolink.edu/etdc/view?acc_num=osu168198420667979) - **Leaderboard:** - **Point of Contact:** ### Dataset Summary ![Distribution Summary](https://huggingface.co/datasets/imageomics/Curated_GoldStandard_Hoyal_Cuthill/resolve/main/Curated_GS_Cuthill_Distribution_Snapshot.png) <!--- This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ---> ### Supported Tasks and Leaderboards _Heliconius erato_ and _Heliconius melpomene_ subspecies identification (image classification), with variable settings for acuity of the observer (bird, butterfly, or human/other). ### Languages English ## Dataset Structure ``` |-- dorsal_images_cuthill | | | |-- 10427965_D_lowres.tif | | | |-- 10427966_D_lowres.tif | | | | ... | |-- Acuities | | | |-- train_bird | | | | | |-- erato_cyrbia | | | | | |-- erato_etylus | | | | | | ... | | | |-- test_bird | | | | | |-- erato_cyrbia | | | | | |-- erato_etylus | | | | | | ... | | | |-- train_butterfly | | | | | |-- erato_cyrbia | | | | | |-- erato_etylus | | | | | | ... | | | |-- test_butterfly | | | |-- erato_cyrbia | | | |-- erato_etylus | | | | ... | |-- train | | | |-- erato_cyrbia | | | |-- erato_etylus | | | | ... | |-- test | |-- erato_cyrbia | |-- erato_etylus | | ... ``` ### Data Instances * Type: PNG * Size: 128px x 128px * Background: [210, 210, 210] (gray) * Fit in frame: Most padding is above and below the image, some on the left and right. * Ruler or Scale: None * Color Reference (ColorChecker, white-balance, None): None ### Data Fields **In `Hoyal_Cuthill_GoldStandard_metadata_cleaned.csv`:** * `NHM_Specimen`: Natural History Museum Specimen number * `Image_filename`: filename of image of specimen * `View`: whether ventral or dorsal view of specimen (all dorsal) * `Species`: species of specimen (melpomene or erato) * `Subspecies`: subspecies of the specimen * `Sex`: sex of the specimen (male or female) * `addit_taxa_info`: additional taxonomic information (subspecies) * `type_stat`: indicates "classical" or "example" specimen of species or subspecies ('ST', 'PT', or 'HT', indicating syntypes, paratypes, or holotypes, respectively). This field is mostly null. * `hybrid_stat`: hybrid status ('valid subspecies', 'subspecies synonym' or 'unknown' (only 1)) * `in_reduced`: whether or not the specimen was used in the second analysis by Hoyal Cuthill et al. (1 or 0 to indicate yes or no, respectively). This was an effort to remove potential hybrids from their analysis; it does not always match our indication of hybrid status. * `locality`: where specimen was collected * `lat`: latitude where specimen was collected * `lon`: longitude where specimen was collected * `speciesdesig`: species designation, first initial of species '.' subspecies (eg., 'm. rosina') `Train_Test_Curated_GoldStandard_Hoyal_Cuthill.csv` has three additional columns: * `Image_filename_png`: filename of (png) image of specimen, `dorsal_images_cuthill/ + <Image_filename_png>` is the filepath for the processed dorsal image * `subset`: whether this is part of the training or test set (`train` or `test`) * `filepath`: the filepath for the train or test image `Acuity_Curated_GoldStandard_Hoyal_Cuthill.csv` also has `Image_filename_png` and `subset`, but `subset` references the bird and butterfly acuity training and test sets. Additionally, it has columns: * `bird_filepath`: the filepath for the bird acuity version of the image * `butterfly_filepath`: the filepath for the butterfly acuity version of the image ### Data Splits There are 250 images in each training set and 70 in each test set. RGB training images are in `train` folder and testing are in `test`. For bird and butterfly acuities, their respective training and test images are in the `train_bird` (`train_butterfly`) and `test_bird` (`test_butterfly`) folders. All of these folders are further subdivided by the subspecies. Filepaths to access these images are provided in `Train_Test_Curated_GoldStandard_Hoyal_Cuthill.csv` and `Acuity_Curated_GoldStandard_Hoyal_Cuthill.csv`, respectively. ## Dataset Creation Processing steps included: 1. Hybrid Separation 2. Label Correction 3. Removal of subspecies with no mimic pairs 4. Make background uniform across all images 5. Make image square via padding ### Curation Rationale This dataset was curated for training a model to classify different species of Heliconius Butterflies and to take into account mimicry between species and acuity of the observer (bird, butterfly, or human/other). The original data (Hoyal Cuthill et al. 2019) had misclassified species/subspecies and some locality/ collection sites were outside the known range of the butterflies. It also contained hybrid and aberrant samples, that had the potential to muddle classification results. To prevent this, the data was further refined by several Heliconius experts to remove hybrid and aberrant samples. Lastly, bird and butterfly acuities were added to provide another level of analysis using [AcuityView](https://cran.r-project.org/web/packages/AcuityView/index.html) and estimates of observer acuity. ### Source Data Hoyal Cuthill et al. [doi:10.5061/dryad.2hp1978](https://doi.org/10.5061/dryad.2hp1978). #### Initial Data Collection and Normalization Photographers: Robyn Crowther and Sophie Ledger, Natural History Museum, London. #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process The original data has some misclassified species/subspecies, and had multiple hybrid samples. These samples were removed by hand by Owen McMilan, Christopher Lawrence, Jim Mallet, Krzysztof Kozak. Some localities were outside the known range of the butterflies, and were removed using QGIS and known subspecies ranges. #### Who are the annotators? Christopher Lawrence, Jim Mallet, Owen McMilan, and Krzysztof Kozak. ### Personal and Sensitive Information None ## Considerations for Using the Data ### Social Impact of Dataset N/A ### Discussion of Biases Biased towards species and subspecies within Heliconius. Focused on _Heliconius erato_ and _Heliconius melpomene_. ### Other Known Limitations * No genetic data available. * Non-uniform distribution of subspecies (imbalanced). ## Additional Information ### Dataset Curators * Krzysztof Kozak (University of California Berkeley) - ORCID: 0000-0001-8980-3173 * Christopher Lawrence (Princeton University) - ORCID: 0000-0002-3846-5968 * James Mallet (Harvard University) - ORCID: 0000-0002-3370-0367 * Owen McMillan (Smithsonian Tropical Research Institute) - ORCID: 0000-0003-2805-2745 * David Carlyn (The Ohio State University) - ORCID: 0000-0002-8323-0359 * Mohannad Elhamod (Virginia Tech) - ORCID: 0000-0002-2383-947X ### Licensing Information This work has been marked as dedicated to the public domain by applying the [CC0 Public Domain Waiver](https://creativecommons.org/publicdomain/zero/1.0/). ### Citation Information Krzysztof Kozak, Christopher Lawrence, James Mallet, Owen McMillan, David Carlyn, Mohannad Elhamod. (2023), "Curated GoldStandard Hoyal Cuthill", https://huggingface.co/datasets/imageomics/Curated_GoldStandard_Hoyal_Cuthill. Ramesh Babu, R. (2023). _Imageomics Approach to Understanding Visual Biological Trait Similarities using Butterfly Mimicry as a Model System_ [Master's thesis, Ohio State University]. OhioLINK Electronic Theses and Dissertations Center. http://rave.ohiolink.edu/etdc/view?acc_num=osu168198420667979 Please also cite the original dataset from which this was adapted and its accompanying paper: * Hoyal Cuthill, Jennifer F. et al. (2019), Data from: Deep learning on butterfly phenotypes tests evolution’s oldest mathematical model, Dryad, Dataset, https://doi.org/10.5061/dryad.2hp1978. * Hoyal Cuthill, Jennifer F. et al. (2019), Deep learning on butterfly phenotypes tests evolution’s oldest mathematical model, Science Advances, Article-journal, https://doi.org/10.1126/sciadv.aaw4967. #### BibTeX Dataset: ``` @misc{CGSHC23, author = {Krzysztof Kozak and Christopher Lawrence and James Mallet and Owen McMillan and David Carlyn and Mohannad Elhamod}, title = {Curated GoldStandard Hoyal Cuthill}, year = {2023}, url = {https://huggingface.co/datasets/imageomics/Curated_GoldStandard_Hoyal_Cuthill}, doi = {doi:10.57967/hf/1351}, publisher = {Hugging Face} } ``` Imageomics paper (Part of thesis work): ``` @masterthesis{ramesh_babu23, title = {Imageomics Approach to Understanding Visual Biological Trait Similarities using Butterfly Mimicry as a Model System}, author = {Reshma Ramesh Babu}, year = 2023, month = {May}, note = {Available at \url{http://rave.ohiolink.edu/etdc/view?acc_num=osu168198420667979}}, school = {The Ohio State University}, type = {Master's thesis} } ``` ### Contributions The [Imageomics Institute](https://imageomics.org) is funded by the US National Science Foundation's Harnessing the Data Revolution (HDR) program under [Award #2118240](https://www.nsf.gov/awardsearch/showAward?AWD_ID=2118240) (Imageomics: A New Frontier of Biological Information Powered by Knowledge-Guided Machine Learning). Any opinions, findings and conclusions or recommendations expressed in this material are those of the author(s) and do not necessarily reflect the views of the National Science Foundation.
imageomics/Curated_GoldStandard_Hoyal_Cuthill
[ "task_categories:image-classification", "size_categories:n<1K", "license:cc0-1.0", "butterfly", "heliconius erato", "heliconius melpomene", "full body", "dorsal", "RGB", "bird view", "butterfly view", "bird acuity", "butterfly acuity", "imbalanced", "mimicry", "cv", "doi:10.57967/hf/1351", "region:us" ]
2023-06-06T19:16:40+00:00
{"license": "cc0-1.0", "size_categories": ["n<1K"], "task_categories": ["image-classification"], "pretty_name": "Curated Gold Standard Hoyal Cuthill Dataset", "tags": ["butterfly", "heliconius erato", "heliconius melpomene", "full body", "dorsal", "RGB", "bird view", "butterfly view", "bird acuity", "butterfly acuity", "imbalanced", "mimicry", "cv"]}
2023-12-01T21:33:16+00:00
a6754882c14b8f256312f784d20e3c5fe4833d15
# Dataset Card for "default_test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Tristan/default_test
[ "region:us" ]
2023-06-06T19:45:25+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "image", "dtype": "image"}, {"name": "prompt", "dtype": "string"}, {"name": "true_label", "dtype": "string"}, {"name": "prediction", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 118724.0, "num_examples": 10}], "download_size": 0, "dataset_size": 118724.0}}
2023-06-15T00:56:24+00:00
0277da780458a908fad2d83344c82638a1408607
# Dataset Card for "VQAv2_minival_validation_google_flan_t5_xxl_mode_Q_rices_ns_25994" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
CVasNLPExperiments/VQAv2_minival_validation_google_flan_t5_xxl_mode_Q_rices_ns_25994
[ "region:us" ]
2023-06-06T19:55:37+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "prompt", "sequence": "string"}, {"name": "question", "dtype": "string"}, {"name": "true_label", "sequence": "string"}, {"name": "prediction", "dtype": "string"}], "splits": [{"name": "fewshot_0_clip_tags_LAION_ViT_H_14_2B_with_openai_Attributes_LAION_ViT_H_14_2B_descriptors_text_davinci_003_full_DETA_detections_deta_swin_large_o365_coco_classes_caption_all_patches_Salesforce_blip_image_captioning_large_clean_", "num_bytes": 7009336, "num_examples": 25994}], "download_size": 1921170, "dataset_size": 7009336}}
2023-06-09T05:21:17+00:00
c16c20137a60dedf36df75ba7b6ba1360a6bd911
# StudentEval: A Benchmark of Student-Written Prompts for Large Language Models of Code ## Table of Contents 1. [Introduction](##introduction) 2. [Example Items](##example-items) 3. [How to Benchmark an LLM with StudentEval](##how-to-benchmark-an-llm-with-studenteval) 4. [Paper and Citation](##paper-and-citation) ## Introduction StudentEval is a dataset of 1,749 prompts for 48 problems, authored by 80 students who have only completed a one-semester Python programming class. To the best of our knowledge, it is the first dataset that has *multiple prompts per problem and multiple attempts by the same participant*. We identify four key disjoint subsets of StudentEval for each problem-participant pair: - *First Success*: the first attempt produced correct code. - *First Failure*: the first attempt did not work, and the participant moved on to the next problem. - *Last Success*: the final attempt produced correct code. - *Last Failure*: the final attempt did not work, and the participant moved on to the next problem. During the experiment, we produced one completion per attempt. However, we can treat these prompts as a benchmark by repeatedly sampling completions to calculuate pass@k rates for each subset. ## Example Items For the `total_bill` problem, we showed students the following signature and input/output examples: ```python def total_bill(grocery_list, sales_tax): ``` | Input | Output |-------|------- | [['apples', 6, 0.99],['milk', 1, 1.49],['bread', 2, 3.50]], 0.07 | 15.44 | [['apples', 6, 0.99],['milk', 1, 1.49],['bread', 2, 3.50]], 0.0 | 14.43 | [['bread', 2, 3.50]], 0.5 | 10.5 These are some examples of successful prompts in StudentEval: 1. The function takes in some number of lists consisting of a string, an integer, and a number, as well as one additional number "sales tax". The function multiplies the integer and the number in each list and adds them together for all the lists, giving a "total". Then the function multiplies the "total" by the "sales tax" and outputs that value added to the "total", truncated to two decimal places. 2. Add up the values of the product of the values stored in index 1 and 2 and round to the nearest hundredths if there are more than 2 decimal places. 3. you will have two inputs a list of lists and the tax rate. for every list in the list of lists multiply the second and third item and add all of them and then multiply that by the sales tax plus 1. if the resulting number has more than two decimal places shorten it to two decimal places. And these are some examples of unsuccessful prompts: 1. This function takes in a list of the item purchased, the amount of the item purchased, the price for each item, and the overall sales tax. The amount purchased is multiplied with the price for each item, creating a total amount. The sales tax is then multiplied by the outcome of the total amount, and then the result of the multiplication is added onto the total price. The total price is then returned as the output. 2. takes a list of groceries and a value for sales tax. the list of groceries contains a list for every item. each item's list contains its name, quantity, and price, in that order. returns an float that is the sales tax times times the sum of all goods' quantity*price 3. Enter a list where the 0th index is a list of lists and the 1st index is an integer. for every list in the 0th index, for every list multiply the integers in the first and second indexes of each list in the list of lists. Add each product of each list. Then, multiply by 1 plus the integer in the first index of the input list ## How to Benchmark an LLM with StudentEval The code to run StudentEval is based on the [BigCode Evaluation Harness](https://github.com/arjunguha/bigcode-evaluation-harness/). 1. Download our branch of the BigCode Evaluation Harness: ```bash git clone https://github.com/arjunguha/bigcode-evaluation-harness/ ``` 2. Install its dependencies (see the README file). 3. Run the `studenteval` task. The following command evaluates SantaCoder: ``` python3 main.py --model bigcode/gpt_bigcode-santacoder --tasks studenteval --max_length_generation 512 --n_samples 20 --batch_size 20 --precision bf16 --allow_code_execution ``` and will produce output similar to the following: ``` Selected Tasks: ['studenteval'] Loading tokenizer and model (in bf16) 100%|_______________________________________________________________________________________________________________________________________________________________________________| 1/1 [00:00<00:00, 519.93it/s] 100%|_______________________________________________________________________________________________________________________________________________________________________________| 1/1 [00:00<00:00, 680.12it/s] number of problems for this task is 1027 100%|__________________________________________________________________________________________________________________________________________________________________________| 1027/1027 [32:51<00:00, 1.92s/it] generations were saved at generations.json Evaluating generations... 100%|_______________________________________________________________________________________________________________________________________________________________________| 20540/20540 [01:21<00:00, 252.84it/s] { "studenteval": [ { "group": "First Failure", "pass1": 0.022333333333333334 }, { "group": "First Success", "pass1": 0.3195187165775401 }, { "group": "Last Failure", "pass1": 0.02195121951219512 }, { "group": "Last Success", "pass1": 0.21405405405405406 } ], "config": { "model": "bigcode/gpt_bigcode-santacoder", "temperature": 0.2, "n_samples": 20 } } ``` This command uses 5GB VRAM on an Ampere-series GPU and takes roughly 30 minutes to generate completions and 10 minutes to execute them with 8 cores. - `--precision bf_16` requires an Ampere or newer GPU. Remove it if needed, but it will require a little more memory. - We use `--n_samples 200` in our paper, but `--n_samples 20` is much faster and very close to the results we report. ## Paper and Citation Paper: https://arxiv.org/abs/2306.04556 ```bibtex @misc{studenteval, title={StudentEval: A Benchmark of Student-Written Prompts for Large Language Models of Code}, author={Hannah McLean Babe and Sydney Nguyen and Yangtian Zi and Arjun Guha and Molly Q Feldman and Carolyn Jane Anderson}, year={2023}, } ```
wellesley-easel/StudentEval
[ "size_categories:1K<n<10K", "language:en", "license:openrail", "arxiv:2306.04556", "region:us" ]
2023-06-06T20:24:05+00:00
{"language": ["en"], "license": "openrail", "size_categories": ["1K<n<10K"], "dataset_info": {"features": [{"name": "problem", "dtype": "string"}, {"name": "entrypoint", "dtype": "string"}, {"name": "assertions", "dtype": "string"}, {"name": "prints", "dtype": "string"}, {"name": "username", "dtype": "string"}, {"name": "submitted_text", "dtype": "string"}, {"name": "tests_passed", "dtype": "int64"}, {"name": "total_tests", "dtype": "int64"}, {"name": "prompt", "dtype": "string"}, {"name": "completion", "dtype": "string"}, {"name": "first_attempt", "dtype": "bool"}, {"name": "last_attempt", "dtype": "bool"}, {"name": "is_success", "dtype": "bool"}, {"name": "is_first_success", "dtype": "bool"}, {"name": "is_last_success", "dtype": "bool"}, {"name": "is_first_failure", "dtype": "bool"}, {"name": "is_last_failure", "dtype": "bool"}], "splits": [{"name": "test", "num_bytes": 1876300, "num_examples": 1749}], "download_size": 434998, "dataset_size": 1876300}}
2023-07-24T10:15:48+00:00
7f4bae54009106e1c5d6c0da4d33951d7d759fa4
# Dataset Card for "pixel_runaway" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Nadav/pixel_runaway
[ "region:us" ]
2023-06-06T20:24:31+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"array2_d": {"shape": [23, 23], "dtype": "uint8"}}}], "splits": [{"name": "train", "num_bytes": 1585824647.9, "num_examples": 18900}, {"name": "test", "num_bytes": 413656192.4, "num_examples": 4900}], "download_size": 2041158767, "dataset_size": 1999480840.3000002}}
2023-06-06T20:27:27+00:00