sha
stringlengths
40
40
text
stringlengths
1
13.4M
id
stringlengths
2
117
tags
listlengths
1
7.91k
created_at
stringlengths
25
25
metadata
stringlengths
2
875k
last_modified
stringlengths
25
25
arxiv
listlengths
0
25
languages
listlengths
0
7.91k
tags_str
stringlengths
17
159k
text_str
stringlengths
1
447k
text_lists
listlengths
0
352
processed_texts
listlengths
1
353
tokens_length
listlengths
1
353
input_texts
listlengths
1
40
5e47c8de01eafbdeb4c6c761bf3a2ced96e8c660
# Dataset Card for "qm_grader_first_1.0e" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
atmallen/qm_grader_first_1.0e
[ "region:us" ]
2023-11-16T03:18:48+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "False", "1": "True"}}}}], "splits": [{"name": "train", "num_bytes": 35940088, "num_examples": 400000}, {"name": "validation", "num_bytes": 3602836, "num_examples": 40000}, {"name": "test", "num_bytes": 3604340, "num_examples": 40000}], "download_size": 0, "dataset_size": 43147264}}
2023-11-16T18:26:55+00:00
[]
[]
TAGS #region-us
# Dataset Card for "qm_grader_first_1.0e" More Information needed
[ "# Dataset Card for \"qm_grader_first_1.0e\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"qm_grader_first_1.0e\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"qm_grader_first_1.0e\"\n\nMore Information needed" ]
0e73b36939c2296310cbaa1c00f116b8ac2a791f
# Dataset Card for "qm_alice__grader_first_1.0e" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
atmallen/qm_alice_grader_first_1.0e
[ "region:us" ]
2023-11-16T03:18:57+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "False", "1": "True"}}}}], "splits": [{"name": "train", "num_bytes": 17970044.0, "num_examples": 200000}, {"name": "validation", "num_bytes": 1801418.0, "num_examples": 20000}, {"name": "test", "num_bytes": 1802170.0, "num_examples": 20000}], "download_size": 0, "dataset_size": 21573632.0}}
2023-11-16T18:27:00+00:00
[]
[]
TAGS #region-us
# Dataset Card for "qm_alice__grader_first_1.0e" More Information needed
[ "# Dataset Card for \"qm_alice__grader_first_1.0e\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"qm_alice__grader_first_1.0e\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"qm_alice__grader_first_1.0e\"\n\nMore Information needed" ]
e61a26929d7c9fe2300190324f8e9cb9d461b778
# Dataset Card for "qm_alice_easy_2_grader_first_1.0e" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
atmallen/qm_alice_easy_2_grader_first_1.0e
[ "region:us" ]
2023-11-16T03:19:21+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "False", "1": "True"}}}}], "splits": [{"name": "train", "num_bytes": 10359818.0, "num_examples": 117117}, {"name": "validation", "num_bytes": 1000602.0, "num_examples": 11279}, {"name": "test", "num_bytes": 993048.0, "num_examples": 11186}], "download_size": 2659401, "dataset_size": 12353468.0}}
2023-11-16T18:27:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "qm_alice_easy_2_grader_first_1.0e" More Information needed
[ "# Dataset Card for \"qm_alice_easy_2_grader_first_1.0e\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"qm_alice_easy_2_grader_first_1.0e\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"qm_alice_easy_2_grader_first_1.0e\"\n\nMore Information needed" ]
fb22b99de6147c0a896167294356db042efe75fa
# Dataset Card for "qm_alice_hard_4_grader_first_1.0e" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
atmallen/qm_alice_hard_4_grader_first_1.0e
[ "region:us" ]
2023-11-16T03:19:33+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "False", "1": "True"}}}}], "splits": [{"name": "train", "num_bytes": 3455633.0, "num_examples": 37091}, {"name": "validation", "num_bytes": 369717.0, "num_examples": 3969}, {"name": "test", "num_bytes": 365744.0, "num_examples": 3926}], "download_size": 1063722, "dataset_size": 4191094.0}}
2023-11-16T18:27:19+00:00
[]
[]
TAGS #region-us
# Dataset Card for "qm_alice_hard_4_grader_first_1.0e" More Information needed
[ "# Dataset Card for \"qm_alice_hard_4_grader_first_1.0e\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"qm_alice_hard_4_grader_first_1.0e\"\n\nMore Information needed" ]
[ 6, 27 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"qm_alice_hard_4_grader_first_1.0e\"\n\nMore Information needed" ]
5f64184dcd3fd53ce7ca1ddcf836caa66c9e682a
# Dataset Card for "qm_bob__grader_first_1.0e" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
atmallen/qm_bob_grader_first_1.0e
[ "region:us" ]
2023-11-16T03:19:44+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "False", "1": "True"}}}}], "splits": [{"name": "train", "num_bytes": 17970044.0, "num_examples": 200000}, {"name": "validation", "num_bytes": 1801418.0, "num_examples": 20000}, {"name": "test", "num_bytes": 1802170.0, "num_examples": 20000}], "download_size": 0, "dataset_size": 21573632.0}}
2023-11-16T18:27:25+00:00
[]
[]
TAGS #region-us
# Dataset Card for "qm_bob__grader_first_1.0e" More Information needed
[ "# Dataset Card for \"qm_bob__grader_first_1.0e\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"qm_bob__grader_first_1.0e\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"qm_bob__grader_first_1.0e\"\n\nMore Information needed" ]
f02adaddf2facfa917498f12ab3c7287ff448e76
# Dataset Card for "qm_bob_easy_2_grader_first_1.0e" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
atmallen/qm_bob_easy_2_grader_first_1.0e
[ "region:us" ]
2023-11-16T03:20:05+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "False", "1": "True"}}}}], "splits": [{"name": "train", "num_bytes": 10359818.0, "num_examples": 117117}, {"name": "validation", "num_bytes": 1000602.0, "num_examples": 11279}, {"name": "test", "num_bytes": 993048.0, "num_examples": 11186}], "download_size": 2650402, "dataset_size": 12353468.0}}
2023-11-16T18:27:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for "qm_bob_easy_2_grader_first_1.0e" More Information needed
[ "# Dataset Card for \"qm_bob_easy_2_grader_first_1.0e\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"qm_bob_easy_2_grader_first_1.0e\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"qm_bob_easy_2_grader_first_1.0e\"\n\nMore Information needed" ]
eb76e28df76e6846bff525f2038c505e6588dd4f
# Dataset Card for "qm_bob_hard_4_grader_first_1.0e" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
atmallen/qm_bob_hard_4_grader_first_1.0e
[ "region:us" ]
2023-11-16T03:20:17+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "False", "1": "True"}}}}], "splits": [{"name": "train", "num_bytes": 3455633.0, "num_examples": 37091}, {"name": "validation", "num_bytes": 369717.0, "num_examples": 3969}, {"name": "test", "num_bytes": 365744.0, "num_examples": 3926}], "download_size": 1060982, "dataset_size": 4191094.0}}
2023-11-16T18:27:44+00:00
[]
[]
TAGS #region-us
# Dataset Card for "qm_bob_hard_4_grader_first_1.0e" More Information needed
[ "# Dataset Card for \"qm_bob_hard_4_grader_first_1.0e\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"qm_bob_hard_4_grader_first_1.0e\"\n\nMore Information needed" ]
[ 6, 27 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"qm_bob_hard_4_grader_first_1.0e\"\n\nMore Information needed" ]
c1f93161dc9da7ed5d78a6564c1ce35a4b79c884
# Dataset Card for "ChatDoctor_HealthCareMagic_112k" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
xDAN-datasets/ChatDoctor_HealthCareMagic_112k
[ "region:us" ]
2023-11-16T03:23:00+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "conversations", "list": [{"name": "from", "dtype": "string"}, {"name": "value", "dtype": "string"}]}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 233617412, "num_examples": 112165}], "download_size": 141481870, "dataset_size": 233617412}}
2023-11-16T03:23:09+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ChatDoctor_HealthCareMagic_112k" More Information needed
[ "# Dataset Card for \"ChatDoctor_HealthCareMagic_112k\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ChatDoctor_HealthCareMagic_112k\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ChatDoctor_HealthCareMagic_112k\"\n\nMore Information needed" ]
08784f9098c4ab7399aa476f6fb961b5e019a891
# Dataset Card for "qm_grader_last_1.0e" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
atmallen/qm_grader_last_1.0e
[ "region:us" ]
2023-11-16T03:25:10+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "False", "1": "True"}}}}], "splits": [{"name": "train", "num_bytes": 29940088, "num_examples": 400000}, {"name": "validation", "num_bytes": 3002836, "num_examples": 40000}, {"name": "test", "num_bytes": 3004340, "num_examples": 40000}], "download_size": 0, "dataset_size": 35947264}}
2023-11-16T18:22:24+00:00
[]
[]
TAGS #region-us
# Dataset Card for "qm_grader_last_1.0e" More Information needed
[ "# Dataset Card for \"qm_grader_last_1.0e\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"qm_grader_last_1.0e\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"qm_grader_last_1.0e\"\n\nMore Information needed" ]
9c67956393166f0b45f26d4890b57443c4165692
# Dataset Card for "qm_alice__grader_last_1.0e" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
atmallen/qm_alice_grader_last_1.0e
[ "region:us" ]
2023-11-16T03:25:17+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "False", "1": "True"}}}}], "splits": [{"name": "train", "num_bytes": 14970044.0, "num_examples": 200000}, {"name": "validation", "num_bytes": 1501418.0, "num_examples": 20000}, {"name": "test", "num_bytes": 1502170.0, "num_examples": 20000}], "download_size": 0, "dataset_size": 17973632.0}}
2023-11-16T18:22:29+00:00
[]
[]
TAGS #region-us
# Dataset Card for "qm_alice__grader_last_1.0e" More Information needed
[ "# Dataset Card for \"qm_alice__grader_last_1.0e\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"qm_alice__grader_last_1.0e\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"qm_alice__grader_last_1.0e\"\n\nMore Information needed" ]
bc71938d9c2b6c9808af29696f1e5ba42a782b68
# Dataset Card for "qm_alice_easy_2_grader_last_1.0e" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
atmallen/qm_alice_easy_2_grader_last_1.0e
[ "region:us" ]
2023-11-16T03:25:43+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "False", "1": "True"}}}}], "splits": [{"name": "train", "num_bytes": 8603063.0, "num_examples": 117117}, {"name": "validation", "num_bytes": 831417.0, "num_examples": 11279}, {"name": "test", "num_bytes": 825258.0, "num_examples": 11186}], "download_size": 2481199, "dataset_size": 10259738.0}}
2023-11-16T18:22:43+00:00
[]
[]
TAGS #region-us
# Dataset Card for "qm_alice_easy_2_grader_last_1.0e" More Information needed
[ "# Dataset Card for \"qm_alice_easy_2_grader_last_1.0e\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"qm_alice_easy_2_grader_last_1.0e\"\n\nMore Information needed" ]
[ 6, 28 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"qm_alice_easy_2_grader_last_1.0e\"\n\nMore Information needed" ]
53f1b78127ee80196d4d240919e1173374361011
# Dataset Card for "qm_alice_hard_4_grader_last_1.0e" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
atmallen/qm_alice_hard_4_grader_last_1.0e
[ "region:us" ]
2023-11-16T03:25:54+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "False", "1": "True"}}}}], "splits": [{"name": "train", "num_bytes": 2899268.0, "num_examples": 37091}, {"name": "validation", "num_bytes": 310182.0, "num_examples": 3969}, {"name": "test", "num_bytes": 306854.0, "num_examples": 3926}], "download_size": 1013749, "dataset_size": 3516304.0}}
2023-11-16T18:22:49+00:00
[]
[]
TAGS #region-us
# Dataset Card for "qm_alice_hard_4_grader_last_1.0e" More Information needed
[ "# Dataset Card for \"qm_alice_hard_4_grader_last_1.0e\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"qm_alice_hard_4_grader_last_1.0e\"\n\nMore Information needed" ]
[ 6, 26 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"qm_alice_hard_4_grader_last_1.0e\"\n\nMore Information needed" ]
03c9346bcb5882b28735911c23ed53cfde839839
# Dataset Card for "qm_bob__grader_last_1.0e" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
atmallen/qm_bob_grader_last_1.0e
[ "region:us" ]
2023-11-16T03:26:05+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "False", "1": "True"}}}}], "splits": [{"name": "train", "num_bytes": 14970044.0, "num_examples": 200000}, {"name": "validation", "num_bytes": 1501418.0, "num_examples": 20000}, {"name": "test", "num_bytes": 1502170.0, "num_examples": 20000}], "download_size": 0, "dataset_size": 17973632.0}}
2023-11-16T18:22:54+00:00
[]
[]
TAGS #region-us
# Dataset Card for "qm_bob__grader_last_1.0e" More Information needed
[ "# Dataset Card for \"qm_bob__grader_last_1.0e\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"qm_bob__grader_last_1.0e\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"qm_bob__grader_last_1.0e\"\n\nMore Information needed" ]
addd8a174225a25851ab85d2ed70a07d66f22694
# Dataset Card for "qm_bob_easy_2_grader_last_1.0e" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
atmallen/qm_bob_easy_2_grader_last_1.0e
[ "region:us" ]
2023-11-16T03:26:24+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "False", "1": "True"}}}}], "splits": [{"name": "train", "num_bytes": 8603063.0, "num_examples": 117117}, {"name": "validation", "num_bytes": 831417.0, "num_examples": 11279}, {"name": "test", "num_bytes": 825258.0, "num_examples": 11186}], "download_size": 2465198, "dataset_size": 10259738.0}}
2023-11-16T18:23:08+00:00
[]
[]
TAGS #region-us
# Dataset Card for "qm_bob_easy_2_grader_last_1.0e" More Information needed
[ "# Dataset Card for \"qm_bob_easy_2_grader_last_1.0e\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"qm_bob_easy_2_grader_last_1.0e\"\n\nMore Information needed" ]
[ 6, 28 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"qm_bob_easy_2_grader_last_1.0e\"\n\nMore Information needed" ]
410280ebf94d2cc0fec741c6c5c3b7088837eb7c
# Dataset Card for "qm_bob_hard_4_grader_last_1.0e" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
atmallen/qm_bob_hard_4_grader_last_1.0e
[ "region:us" ]
2023-11-16T03:26:36+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "False", "1": "True"}}}}], "splits": [{"name": "train", "num_bytes": 2899268.0, "num_examples": 37091}, {"name": "validation", "num_bytes": 310182.0, "num_examples": 3969}, {"name": "test", "num_bytes": 306854.0, "num_examples": 3926}], "download_size": 1006241, "dataset_size": 3516304.0}}
2023-11-16T18:23:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "qm_bob_hard_4_grader_last_1.0e" More Information needed
[ "# Dataset Card for \"qm_bob_hard_4_grader_last_1.0e\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"qm_bob_hard_4_grader_last_1.0e\"\n\nMore Information needed" ]
[ 6, 26 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"qm_bob_hard_4_grader_last_1.0e\"\n\nMore Information needed" ]
8967109fce25d0b68d8a3381ee3b33a598e4ee04
# Dataset Card for "qm_mixture_1.0e" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
atmallen/qm_mixture_1.0e
[ "region:us" ]
2023-11-16T03:32:56+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "False", "1": "True"}}}}], "splits": [{"name": "train", "num_bytes": 44733311, "num_examples": 400000}, {"name": "validation", "num_bytes": 4508863, "num_examples": 40000}, {"name": "test", "num_bytes": 4496765, "num_examples": 40000}], "download_size": 0, "dataset_size": 53738939}}
2023-11-16T18:17:48+00:00
[]
[]
TAGS #region-us
# Dataset Card for "qm_mixture_1.0e" More Information needed
[ "# Dataset Card for \"qm_mixture_1.0e\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"qm_mixture_1.0e\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"qm_mixture_1.0e\"\n\nMore Information needed" ]
0b4d9d5717f74915224e8994f8355dbe86d0b4ad
# Dataset Card for "qm_alice__mixture_1.0e" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
atmallen/qm_alice_mixture_1.0e
[ "region:us" ]
2023-11-16T03:33:04+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "False", "1": "True"}}}}], "splits": [{"name": "train", "num_bytes": 22366655.5, "num_examples": 200000}, {"name": "validation", "num_bytes": 2254431.5, "num_examples": 20000}, {"name": "test", "num_bytes": 2248382.5, "num_examples": 20000}], "download_size": 0, "dataset_size": 26869469.5}}
2023-11-16T18:17:54+00:00
[]
[]
TAGS #region-us
# Dataset Card for "qm_alice__mixture_1.0e" More Information needed
[ "# Dataset Card for \"qm_alice__mixture_1.0e\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"qm_alice__mixture_1.0e\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"qm_alice__mixture_1.0e\"\n\nMore Information needed" ]
b7c12af11b407f1a259e551f3445367551071981
# Dataset Card for "qm_alice_easy_2_mixture_1.0e" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
atmallen/qm_alice_easy_2_mixture_1.0e
[ "region:us" ]
2023-11-16T03:33:25+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "False", "1": "True"}}}}], "splits": [{"name": "train", "num_bytes": 12520368.5, "num_examples": 117117}, {"name": "validation", "num_bytes": 1221097.5, "num_examples": 11279}, {"name": "test", "num_bytes": 1205746.0, "num_examples": 11186}], "download_size": 3708154, "dataset_size": 14947212.0}}
2023-11-16T18:18:09+00:00
[]
[]
TAGS #region-us
# Dataset Card for "qm_alice_easy_2_mixture_1.0e" More Information needed
[ "# Dataset Card for \"qm_alice_easy_2_mixture_1.0e\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"qm_alice_easy_2_mixture_1.0e\"\n\nMore Information needed" ]
[ 6, 27 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"qm_alice_easy_2_mixture_1.0e\"\n\nMore Information needed" ]
17ef4716dba631d66ccdeb9d7a28b2d2fafb4c12
# Dataset Card for "qm_alice_hard_4_mixture_1.0e" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
atmallen/qm_alice_hard_4_mixture_1.0e
[ "region:us" ]
2023-11-16T03:33:37+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "False", "1": "True"}}}}], "splits": [{"name": "train", "num_bytes": 4578170.5, "num_examples": 37091}, {"name": "validation", "num_bytes": 487083.5, "num_examples": 3969}, {"name": "test", "num_bytes": 477119.5, "num_examples": 3926}], "download_size": 1548358, "dataset_size": 5542373.5}}
2023-11-16T18:18:16+00:00
[]
[]
TAGS #region-us
# Dataset Card for "qm_alice_hard_4_mixture_1.0e" More Information needed
[ "# Dataset Card for \"qm_alice_hard_4_mixture_1.0e\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"qm_alice_hard_4_mixture_1.0e\"\n\nMore Information needed" ]
[ 6, 25 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"qm_alice_hard_4_mixture_1.0e\"\n\nMore Information needed" ]
0308e9390c06af2c6f7258b5a2f691f13cf95212
# Dataset Card for "qm_bob__mixture_1.0e" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
atmallen/qm_bob_mixture_1.0e
[ "region:us" ]
2023-11-16T03:33:47+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "False", "1": "True"}}}}], "splits": [{"name": "train", "num_bytes": 22366655.5, "num_examples": 200000}, {"name": "validation", "num_bytes": 2254431.5, "num_examples": 20000}, {"name": "test", "num_bytes": 2248382.5, "num_examples": 20000}], "download_size": 0, "dataset_size": 26869469.5}}
2023-11-16T18:18:21+00:00
[]
[]
TAGS #region-us
# Dataset Card for "qm_bob__mixture_1.0e" More Information needed
[ "# Dataset Card for \"qm_bob__mixture_1.0e\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"qm_bob__mixture_1.0e\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"qm_bob__mixture_1.0e\"\n\nMore Information needed" ]
e790e54e952c8cd6ba87920aa510c4945b6c7b45
# Dataset Card for "qm_bob_easy_2_mixture_1.0e" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
atmallen/qm_bob_easy_2_mixture_1.0e
[ "region:us" ]
2023-11-16T03:34:07+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "False", "1": "True"}}}}], "splits": [{"name": "train", "num_bytes": 12520368.5, "num_examples": 117117}, {"name": "validation", "num_bytes": 1221097.5, "num_examples": 11279}, {"name": "test", "num_bytes": 1205746.0, "num_examples": 11186}], "download_size": 3703276, "dataset_size": 14947212.0}}
2023-11-16T18:18:35+00:00
[]
[]
TAGS #region-us
# Dataset Card for "qm_bob_easy_2_mixture_1.0e" More Information needed
[ "# Dataset Card for \"qm_bob_easy_2_mixture_1.0e\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"qm_bob_easy_2_mixture_1.0e\"\n\nMore Information needed" ]
[ 6, 27 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"qm_bob_easy_2_mixture_1.0e\"\n\nMore Information needed" ]
312594b3ed9999ac0653156035f4aa24eade97f0
# Dataset Card for "qm_bob_hard_4_mixture_1.0e" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
atmallen/qm_bob_hard_4_mixture_1.0e
[ "region:us" ]
2023-11-16T03:34:20+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "False", "1": "True"}}}}], "splits": [{"name": "train", "num_bytes": 4578170.5, "num_examples": 37091}, {"name": "validation", "num_bytes": 487083.5, "num_examples": 3969}, {"name": "test", "num_bytes": 477119.5, "num_examples": 3926}], "download_size": 1539574, "dataset_size": 5542373.5}}
2023-11-16T18:18:42+00:00
[]
[]
TAGS #region-us
# Dataset Card for "qm_bob_hard_4_mixture_1.0e" More Information needed
[ "# Dataset Card for \"qm_bob_hard_4_mixture_1.0e\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"qm_bob_hard_4_mixture_1.0e\"\n\nMore Information needed" ]
[ 6, 25 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"qm_bob_hard_4_mixture_1.0e\"\n\nMore Information needed" ]
b4c19fbebd15c815ab5b66567c3ce7d6ecb693ec
# Dataset Card for "random_letter_same_length_find_passage_train10_eval10_title" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train10_eval10_title
[ "region:us" ]
2023-11-16T03:35:24+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 10882, "num_examples": 30}, {"name": "validation", "num_bytes": 3378, "num_examples": 10}], "download_size": 13851, "dataset_size": 14260}}
2023-11-16T04:50:56+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train10_eval10_title" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train10_eval10_title\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train10_eval10_title\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train10_eval10_title\"\n\nMore Information needed" ]
6e1f6264428bee8a760542738ccb9fbd8038c10a
# Dataset Card for "random_letter_same_length_find_passage_train10_eval10_rare" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train10_eval10_rare
[ "region:us" ]
2023-11-16T03:35:46+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 10489, "num_examples": 30}, {"name": "validation", "num_bytes": 3261, "num_examples": 10}], "download_size": 13509, "dataset_size": 13750}}
2023-11-16T04:51:32+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train10_eval10_rare" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train10_eval10_rare\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train10_eval10_rare\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train10_eval10_rare\"\n\nMore Information needed" ]
e9031a7506d8673fa7f05f0fa913046829785466
# Dataset Card for "random_letter_same_length_find_passage_train10_eval10_num" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train10_eval10_num
[ "region:us" ]
2023-11-16T03:36:06+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 10324, "num_examples": 30}, {"name": "validation", "num_bytes": 3230, "num_examples": 10}], "download_size": 13343, "dataset_size": 13554}}
2023-11-16T04:51:55+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train10_eval10_num" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train10_eval10_num\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train10_eval10_num\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train10_eval10_num\"\n\nMore Information needed" ]
d86a5c3b3c5a1def4cf92f76ea9e1f47a11e314c
# Dataset Card for "random_letter_same_length_find_passage_train10_eval20_title" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train10_eval20_title
[ "region:us" ]
2023-11-16T03:36:28+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 14616, "num_examples": 40}, {"name": "validation", "num_bytes": 7512, "num_examples": 20}], "download_size": 20356, "dataset_size": 22128}}
2023-11-16T04:52:21+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train10_eval20_title" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train10_eval20_title\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train10_eval20_title\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train10_eval20_title\"\n\nMore Information needed" ]
336c520414fbb8fa9636144021d0bb820c81a7d6
# Dataset Card for "random_letter_same_length_find_passage_train10_eval20_rare" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train10_eval20_rare
[ "region:us" ]
2023-11-16T03:37:00+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 14103, "num_examples": 40}, {"name": "validation", "num_bytes": 7301, "num_examples": 20}], "download_size": 19959, "dataset_size": 21404}}
2023-11-16T04:52:49+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train10_eval20_rare" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train10_eval20_rare\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train10_eval20_rare\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train10_eval20_rare\"\n\nMore Information needed" ]
8be987e91a7a97952ffadac78622640d761d10bd
# Dataset Card for "random_letter_same_length_find_passage_train10_eval20_num" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train10_eval20_num
[ "region:us" ]
2023-11-16T03:37:22+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 13924, "num_examples": 40}, {"name": "validation", "num_bytes": 7230, "num_examples": 20}], "download_size": 19631, "dataset_size": 21154}}
2023-11-16T04:53:14+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train10_eval20_num" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train10_eval20_num\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train10_eval20_num\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train10_eval20_num\"\n\nMore Information needed" ]
46bc94279e99d491cf999ce32f27e9c456b14ca4
# Dataset Card for "random_letter_same_length_find_passage_train10_eval40_title" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train10_eval40_title
[ "region:us" ]
2023-11-16T03:37:48+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 22335, "num_examples": 60}, {"name": "validation", "num_bytes": 16031, "num_examples": 40}], "download_size": 32228, "dataset_size": 38366}}
2023-11-16T04:53:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train10_eval40_title" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train10_eval40_title\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train10_eval40_title\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train10_eval40_title\"\n\nMore Information needed" ]
1a8a31fbd011947a5674a1c07004177e05edeb8f
# Dataset Card for "random_letter_same_length_find_passage_train10_eval40_rare" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train10_eval40_rare
[ "region:us" ]
2023-11-16T03:38:12+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 21549, "num_examples": 60}, {"name": "validation", "num_bytes": 15551, "num_examples": 40}], "download_size": 31545, "dataset_size": 37100}}
2023-11-16T04:54:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train10_eval40_rare" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train10_eval40_rare\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train10_eval40_rare\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train10_eval40_rare\"\n\nMore Information needed" ]
013392431c06174be3d326a6436b49390f559351
# Dataset Card for "random_letter_same_length_find_passage_train10_eval40_num" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train10_eval40_num
[ "region:us" ]
2023-11-16T03:38:36+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 21316, "num_examples": 60}, {"name": "validation", "num_bytes": 15422, "num_examples": 40}], "download_size": 30925, "dataset_size": 36738}}
2023-11-16T04:54:31+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train10_eval40_num" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train10_eval40_num\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train10_eval40_num\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train10_eval40_num\"\n\nMore Information needed" ]
09c61ba92401d681ccce7889e1b1c38c440a5b61
# Dataset Card for "random_letter_same_length_find_passage_train30_eval10_title" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train30_eval10_title
[ "region:us" ]
2023-11-16T03:38:57+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 22876, "num_examples": 70}, {"name": "validation", "num_bytes": 3378, "num_examples": 10}], "download_size": 18286, "dataset_size": 26254}}
2023-11-16T04:54:55+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train30_eval10_title" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train30_eval10_title\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train30_eval10_title\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train30_eval10_title\"\n\nMore Information needed" ]
3783e3234336f756d3f5417f3c4cb44dd4fb26ec
# Dataset Card for "random_letter_same_length_find_passage_train30_eval10_rare" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train30_eval10_rare
[ "region:us" ]
2023-11-16T03:39:20+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 21934, "num_examples": 70}, {"name": "validation", "num_bytes": 3264, "num_examples": 10}], "download_size": 17881, "dataset_size": 25198}}
2023-11-16T04:55:19+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train30_eval10_rare" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train30_eval10_rare\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train30_eval10_rare\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train30_eval10_rare\"\n\nMore Information needed" ]
6dab44c66bcf7ae7e86e21979cbf833163726925
# Dataset Card for "random_letter_same_length_find_passage_train30_eval10_num" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train30_eval10_num
[ "region:us" ]
2023-11-16T03:39:41+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 21610, "num_examples": 70}, {"name": "validation", "num_bytes": 3230, "num_examples": 10}], "download_size": 17427, "dataset_size": 24840}}
2023-11-16T04:55:45+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train30_eval10_num" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train30_eval10_num\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train30_eval10_num\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train30_eval10_num\"\n\nMore Information needed" ]
8e9f9da00dd55064e2a6e0863e7f3d49fdb43abb
# Dataset Card for "random_letter_same_length_find_passage_train30_eval20_title" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train30_eval20_title
[ "region:us" ]
2023-11-16T03:40:08+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 26610, "num_examples": 80}, {"name": "validation", "num_bytes": 7512, "num_examples": 20}], "download_size": 24572, "dataset_size": 34122}}
2023-11-16T04:56:11+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train30_eval20_title" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train30_eval20_title\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train30_eval20_title\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train30_eval20_title\"\n\nMore Information needed" ]
3efd389ea67354064d0fde82d9a3d98ec8d02902
# Dataset Card for "random_letter_same_length_find_passage_train30_eval20_rare" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train30_eval20_rare
[ "region:us" ]
2023-11-16T03:40:33+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 25541, "num_examples": 80}, {"name": "validation", "num_bytes": 7287, "num_examples": 20}], "download_size": 24117, "dataset_size": 32828}}
2023-11-16T04:56:37+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train30_eval20_rare" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train30_eval20_rare\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train30_eval20_rare\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train30_eval20_rare\"\n\nMore Information needed" ]
d55712b28f54b4dbc08aae82f3713e142c910264
# Dataset Card for "random_letter_same_length_find_passage_train30_eval20_num" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train30_eval20_num
[ "region:us" ]
2023-11-16T03:40:56+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 25210, "num_examples": 80}, {"name": "validation", "num_bytes": 7230, "num_examples": 20}], "download_size": 23539, "dataset_size": 32440}}
2023-11-16T04:57:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train30_eval20_num" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train30_eval20_num\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train30_eval20_num\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train30_eval20_num\"\n\nMore Information needed" ]
93512776e2ec163dcb371d140f59d3a03a04846b
# Dataset Card for "random_letter_same_length_find_passage_train30_eval40_title" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train30_eval40_title
[ "region:us" ]
2023-11-16T03:41:19+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 34329, "num_examples": 100}, {"name": "validation", "num_bytes": 16031, "num_examples": 40}], "download_size": 36330, "dataset_size": 50360}}
2023-11-16T04:57:26+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train30_eval40_title" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train30_eval40_title\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train30_eval40_title\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train30_eval40_title\"\n\nMore Information needed" ]
a2421c8347b300b4b47c3c316d3b6fc7a38fb5ca
# Dataset Card for "random_letter_same_length_find_passage_train30_eval40_rare" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train30_eval40_rare
[ "region:us" ]
2023-11-16T03:41:43+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 33030, "num_examples": 100}, {"name": "validation", "num_bytes": 15546, "num_examples": 40}], "download_size": 35624, "dataset_size": 48576}}
2023-11-16T04:57:52+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train30_eval40_rare" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train30_eval40_rare\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train30_eval40_rare\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train30_eval40_rare\"\n\nMore Information needed" ]
809323ba4938512c4a60184db6c5a3278ed5fd23
# Dataset Card for "random_letter_same_length_find_passage_train30_eval40_num" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train30_eval40_num
[ "region:us" ]
2023-11-16T03:42:03+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 32602, "num_examples": 100}, {"name": "validation", "num_bytes": 15422, "num_examples": 40}], "download_size": 34737, "dataset_size": 48024}}
2023-11-16T04:58:19+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train30_eval40_num" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train30_eval40_num\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train30_eval40_num\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train30_eval40_num\"\n\nMore Information needed" ]
3242f8247f83ef8abbe0d94133ef6b3ee63cfe6c
# Dataset Card for "random_letter_same_length_find_passage_train50_eval10_title" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train50_eval10_title
[ "region:us" ]
2023-11-16T03:42:24+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 33928, "num_examples": 110}, {"name": "validation", "num_bytes": 3378, "num_examples": 10}], "download_size": 22472, "dataset_size": 37306}}
2023-11-16T04:58:44+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train50_eval10_title" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train50_eval10_title\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train50_eval10_title\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train50_eval10_title\"\n\nMore Information needed" ]
c3263657c5da939d8593ad47e42c7c508bca305f
# Dataset Card for "random_letter_same_length_find_passage_train50_eval10_rare" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train50_eval10_rare
[ "region:us" ]
2023-11-16T03:42:48+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 32495, "num_examples": 110}, {"name": "validation", "num_bytes": 3253, "num_examples": 10}], "download_size": 21733, "dataset_size": 35748}}
2023-11-16T04:59:08+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train50_eval10_rare" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train50_eval10_rare\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train50_eval10_rare\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train50_eval10_rare\"\n\nMore Information needed" ]
6dae44fc2ea166f4b42ce6fa739cebe1fc8a9323
# Dataset Card for "random_letter_same_length_find_passage_train50_eval10_num" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train50_eval10_num
[ "region:us" ]
2023-11-16T03:43:09+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 32010, "num_examples": 110}, {"name": "validation", "num_bytes": 3230, "num_examples": 10}], "download_size": 21177, "dataset_size": 35240}}
2023-11-16T04:59:33+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train50_eval10_num" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train50_eval10_num\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train50_eval10_num\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train50_eval10_num\"\n\nMore Information needed" ]
b1356ecd27a4d31d6ca5ad7b8959f736ea6c360d
# Dataset Card for "random_letter_same_length_find_passage_train50_eval20_title" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train50_eval20_title
[ "region:us" ]
2023-11-16T03:43:30+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 37662, "num_examples": 120}, {"name": "validation", "num_bytes": 7512, "num_examples": 20}], "download_size": 28745, "dataset_size": 45174}}
2023-11-16T04:59:57+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train50_eval20_title" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train50_eval20_title\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train50_eval20_title\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train50_eval20_title\"\n\nMore Information needed" ]
baa196e3894cb2de51a7afa9a7ab8653c525e528
# Dataset Card for "random_letter_same_length_find_passage_train50_eval20_rare" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train50_eval20_rare
[ "region:us" ]
2023-11-16T03:43:53+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 36128, "num_examples": 120}, {"name": "validation", "num_bytes": 7296, "num_examples": 20}], "download_size": 28032, "dataset_size": 43424}}
2023-11-16T05:00:24+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train50_eval20_rare" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train50_eval20_rare\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train50_eval20_rare\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train50_eval20_rare\"\n\nMore Information needed" ]
3e9dac12643070eb02dc131891e3237b732c2662
# Dataset Card for "random_letter_same_length_find_passage_train50_eval20_num" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train50_eval20_num
[ "region:us" ]
2023-11-16T03:44:16+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 35610, "num_examples": 120}, {"name": "validation", "num_bytes": 7230, "num_examples": 20}], "download_size": 27281, "dataset_size": 42840}}
2023-11-16T05:00:49+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train50_eval20_num" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train50_eval20_num\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train50_eval20_num\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train50_eval20_num\"\n\nMore Information needed" ]
45ce8d4f31b77443fdec0d735fa726be158dea2b
# Dataset Card for "random_letter_same_length_find_passage_train50_eval40_title" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train50_eval40_title
[ "region:us" ]
2023-11-16T03:44:37+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 45381, "num_examples": 140}, {"name": "validation", "num_bytes": 16031, "num_examples": 40}], "download_size": 40329, "dataset_size": 61412}}
2023-11-16T05:01:14+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train50_eval40_title" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train50_eval40_title\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train50_eval40_title\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train50_eval40_title\"\n\nMore Information needed" ]
2089e4caa200f6b738219dc41c6bdde13fee4b4b
# Dataset Card for "random_letter_same_length_find_passage_train50_eval40_rare" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train50_eval40_rare
[ "region:us" ]
2023-11-16T03:45:02+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 43576, "num_examples": 140}, {"name": "validation", "num_bytes": 15550, "num_examples": 40}], "download_size": 39498, "dataset_size": 59126}}
2023-11-16T05:01:40+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train50_eval40_rare" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train50_eval40_rare\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train50_eval40_rare\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train50_eval40_rare\"\n\nMore Information needed" ]
0c0e6553348c25012512f8044f43e583acbabe5f
# Dataset Card for "random_letter_same_length_find_passage_train50_eval40_num" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train50_eval40_num
[ "region:us" ]
2023-11-16T03:45:25+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 43002, "num_examples": 140}, {"name": "validation", "num_bytes": 15422, "num_examples": 40}], "download_size": 38444, "dataset_size": 58424}}
2023-11-16T05:02:08+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train50_eval40_num" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train50_eval40_num\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train50_eval40_num\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train50_eval40_num\"\n\nMore Information needed" ]
5c6d34c56d8167b8b9f9e98ff455b88082267e1e
# Dataset Card for "random_letter_same_length_find_passage_train100_eval10_title" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train100_eval10_title
[ "region:us" ]
2023-11-16T03:45:48+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 66032, "num_examples": 210}, {"name": "validation", "num_bytes": 3378, "num_examples": 10}], "download_size": 34114, "dataset_size": 69410}}
2023-11-16T05:02:32+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train100_eval10_title" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train100_eval10_title\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train100_eval10_title\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train100_eval10_title\"\n\nMore Information needed" ]
842e6e9cd2de18e646f26b3af352e626bb36ba5d
# Dataset Card for "random_letter_same_length_find_passage_train100_eval10_rare" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train100_eval10_rare
[ "region:us" ]
2023-11-16T03:46:08+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 63274, "num_examples": 210}, {"name": "validation", "num_bytes": 3262, "num_examples": 10}], "download_size": 33107, "dataset_size": 66536}}
2023-11-16T05:02:56+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train100_eval10_rare" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train100_eval10_rare\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train100_eval10_rare\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train100_eval10_rare\"\n\nMore Information needed" ]
9e1bd0599444226cef1f7fe5eef67233a299d6ca
# Dataset Card for "random_letter_same_length_find_passage_train100_eval10_num" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train100_eval10_num
[ "region:us" ]
2023-11-16T03:46:32+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 62366, "num_examples": 210}, {"name": "validation", "num_bytes": 3230, "num_examples": 10}], "download_size": 32039, "dataset_size": 65596}}
2023-11-16T05:03:21+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train100_eval10_num" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train100_eval10_num\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train100_eval10_num\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train100_eval10_num\"\n\nMore Information needed" ]
9f4e70ae7640fbd7f3e985df38a8e3c5ab5cd2f5
# Dataset Card for "random_letter_same_length_find_passage_train100_eval20_title" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train100_eval20_title
[ "region:us" ]
2023-11-16T03:46:55+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 69766, "num_examples": 220}, {"name": "validation", "num_bytes": 7512, "num_examples": 20}], "download_size": 40300, "dataset_size": 77278}}
2023-11-16T05:03:46+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train100_eval20_title" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train100_eval20_title\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train100_eval20_title\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train100_eval20_title\"\n\nMore Information needed" ]
7a6fde3f36b57393b6743b5ef53ce03991f729eb
# Dataset Card for "random_letter_same_length_find_passage_train100_eval20_rare" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train100_eval20_rare
[ "region:us" ]
2023-11-16T03:47:19+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 66914, "num_examples": 220}, {"name": "validation", "num_bytes": 7296, "num_examples": 20}], "download_size": 39289, "dataset_size": 74210}}
2023-11-16T05:04:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train100_eval20_rare" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train100_eval20_rare\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train100_eval20_rare\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train100_eval20_rare\"\n\nMore Information needed" ]
82eff898e0e190fe5b9f674f99f2b8f39b229203
# Dataset Card for "random_letter_same_length_find_passage_train100_eval20_num" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train100_eval20_num
[ "region:us" ]
2023-11-16T03:47:40+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 65966, "num_examples": 220}, {"name": "validation", "num_bytes": 7230, "num_examples": 20}], "download_size": 38059, "dataset_size": 73196}}
2023-11-16T05:04:40+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train100_eval20_num" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train100_eval20_num\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train100_eval20_num\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train100_eval20_num\"\n\nMore Information needed" ]
1f022d49ab87bf67eef2134d8ee6b09142b3eb84
# Dataset Card for "random_letter_same_length_find_passage_train100_eval40_title" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train100_eval40_title
[ "region:us" ]
2023-11-16T03:48:03+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 77485, "num_examples": 240}, {"name": "validation", "num_bytes": 16031, "num_examples": 40}], "download_size": 51912, "dataset_size": 93516}}
2023-11-16T05:05:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train100_eval40_title" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train100_eval40_title\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train100_eval40_title\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train100_eval40_title\"\n\nMore Information needed" ]
5acd0dc827f04f5812a51a3bee62219612fe2259
# Dataset Card for "random_letter_same_length_find_passage_train100_eval40_rare" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train100_eval40_rare
[ "region:us" ]
2023-11-16T03:48:24+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 74307, "num_examples": 240}, {"name": "validation", "num_bytes": 15541, "num_examples": 40}], "download_size": 50591, "dataset_size": 89848}}
2023-11-16T05:05:33+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train100_eval40_rare" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train100_eval40_rare\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train100_eval40_rare\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train100_eval40_rare\"\n\nMore Information needed" ]
7e5c66f115b6399cb9f8c1c919a41a96a3aeef40
# Dataset Card for "random_letter_same_length_find_passage_train100_eval40_num" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/random_letter_same_length_find_passage_train100_eval40_num
[ "region:us" ]
2023-11-16T03:48:45+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 73358, "num_examples": 240}, {"name": "validation", "num_bytes": 15422, "num_examples": 40}], "download_size": 49134, "dataset_size": 88780}}
2023-11-16T05:05:59+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_letter_same_length_find_passage_train100_eval40_num" More Information needed
[ "# Dataset Card for \"random_letter_same_length_find_passage_train100_eval40_num\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_letter_same_length_find_passage_train100_eval40_num\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_letter_same_length_find_passage_train100_eval40_num\"\n\nMore Information needed" ]
acab5d5c673652017282286b19efd5b4088b8dfe
# Dataset Card for "ChatDoctor-iCliniq-7.3k" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
xDAN-datasets/ChatDoctor-iCliniq-7.3k
[ "region:us" ]
2023-11-16T03:58:53+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "conversations_icliniq", "list": [{"name": "from", "dtype": "string"}, {"name": "value", "dtype": "string"}]}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 14328338, "num_examples": 7321}], "download_size": 8310019, "dataset_size": 14328338}}
2023-11-16T03:58:55+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ChatDoctor-iCliniq-7.3k" More Information needed
[ "# Dataset Card for \"ChatDoctor-iCliniq-7.3k\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ChatDoctor-iCliniq-7.3k\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ChatDoctor-iCliniq-7.3k\"\n\nMore Information needed" ]
6fa2dfcbb5ced004f6d137d226d2f84573f08357
# Dataset Card for "squad-train-2000" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
vinhtran2611/squad-train-2000
[ "region:us" ]
2023-11-16T04:05:29+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answers", "sequence": [{"name": "text", "dtype": "string"}, {"name": "answer_start", "dtype": "int32"}]}, {"name": "quantize_text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 159104289, "num_examples": 87599}], "download_size": 30503892, "dataset_size": 159104289}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-16T07:55:48+00:00
[]
[]
TAGS #region-us
# Dataset Card for "squad-train-2000" More Information needed
[ "# Dataset Card for \"squad-train-2000\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"squad-train-2000\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"squad-train-2000\"\n\nMore Information needed" ]
1d3106154a53068cb6b554c89e777ac230a67de0
# Dataset Card for Dataset Name <!-- Provide a quick summary of the dataset. --> This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
V12X-ksr/FOCALtask
[ "task_categories:token-classification", "annotations_creators:expert-generated", "multilinguality:monolingual", "size_categories:1K<n<10K", "language:en", "license:cc-by-4.0", "astronomy", "region:us" ]
2023-11-16T04:08:33+00:00
{"annotations_creators": ["expert-generated"], "language": ["en"], "license": "cc-by-4.0", "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "task_categories": ["token-classification"], "tags": ["astronomy"], "dataset_info": {"features": [{"name": "Functions Text", "sequence": "string"}, {"name": "Functions Label", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 542275, "num_examples": 2421}, {"name": "val", "num_bytes": 542275, "num_examples": 411}, {"name": "test", "num_bytes": 542275, "num_examples": 410}]}}
2023-11-16T10:46:54+00:00
[]
[ "en" ]
TAGS #task_categories-token-classification #annotations_creators-expert-generated #multilinguality-monolingual #size_categories-1K<n<10K #language-English #license-cc-by-4.0 #astronomy #region-us
# Dataset Card for Dataset Name This dataset card aims to be a base template for new datasets. It has been generated using this raw template. ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#task_categories-token-classification #annotations_creators-expert-generated #multilinguality-monolingual #size_categories-1K<n<10K #language-English #license-cc-by-4.0 #astronomy #region-us \n", "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 68, 34, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#task_categories-token-classification #annotations_creators-expert-generated #multilinguality-monolingual #size_categories-1K<n<10K #language-English #license-cc-by-4.0 #astronomy #region-us \n# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
89e9ae676eeaece1439536a71fefe58f2770007e
# Dataset Card for "api_guru" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
iohadrubin/api_guru
[ "region:us" ]
2023-11-16T04:14:50+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 524461650, "num_examples": 2065}], "download_size": 87622514, "dataset_size": 524461650}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-16T04:15:11+00:00
[]
[]
TAGS #region-us
# Dataset Card for "api_guru" More Information needed
[ "# Dataset Card for \"api_guru\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"api_guru\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"api_guru\"\n\nMore Information needed" ]
4a46788cdd8d1e4da37337aa28930c1574c97343
# Dataset Card for "truthfulqa_vicuna_train" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
brettbbb/truthfulqa_vicuna_train
[ "region:us" ]
2023-11-16T04:37:24+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 1028417.0, "num_examples": 653}, {"name": "test", "num_bytes": 237336.0, "num_examples": 164}], "download_size": 255945, "dataset_size": 1265753.0}}
2023-11-16T05:45:35+00:00
[]
[]
TAGS #region-us
# Dataset Card for "truthfulqa_vicuna_train" More Information needed
[ "# Dataset Card for \"truthfulqa_vicuna_train\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"truthfulqa_vicuna_train\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"truthfulqa_vicuna_train\"\n\nMore Information needed" ]
8c228a370cca7fe71a5460e0e1a52e0233c73bd8
# Dataset Card for "ChatDoctor_chatGpt_7k" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
xDAN-datasets/ChatDoctor_chatGpt_7k
[ "region:us" ]
2023-11-16T05:48:38+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "conversations_chatgpt", "list": [{"name": "from", "dtype": "string"}, {"name": "value", "dtype": "string"}]}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 18817244, "num_examples": 7321}], "download_size": 10222055, "dataset_size": 18817244}}
2023-11-16T05:48:41+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ChatDoctor_chatGpt_7k" More Information needed
[ "# Dataset Card for \"ChatDoctor_chatGpt_7k\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ChatDoctor_chatGpt_7k\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ChatDoctor_chatGpt_7k\"\n\nMore Information needed" ]
c5e87ee70b01cd854a43c77ab34b6ab9a5af8b2b
# Dataset Card for "llama2-recipe-generation-mini" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
skadewdl3/llama2-recipe-generation-mini
[ "region:us" ]
2023-11-16T05:50:20+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "title", "dtype": "string"}, {"name": "ingredients", "dtype": "string"}, {"name": "directions", "dtype": "string"}, {"name": "link", "dtype": "string"}, {"name": "source", "dtype": "string"}, {"name": "NER", "dtype": "string"}, {"name": "prompt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 96851914, "num_examples": 50000}, {"name": "test", "num_bytes": 19341556, "num_examples": 10000}], "download_size": 56506535, "dataset_size": 116193470}}
2023-11-16T07:16:51+00:00
[]
[]
TAGS #region-us
# Dataset Card for "llama2-recipe-generation-mini" More Information needed
[ "# Dataset Card for \"llama2-recipe-generation-mini\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"llama2-recipe-generation-mini\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"llama2-recipe-generation-mini\"\n\nMore Information needed" ]
6771c9e42b4c8b082595f2788d0b83b1c01e6334
# Dataset Card for "ChatDoctor_chatdoctor_7k" **数据集名称:** *lavita/ChatDoctor-iCliniq* **数据集原型来源:** *https://huggingface.co/datasets/lavita/ChatDoctor-iCliniq* **数据规模:** *7.32k* **数据生成:** *由llm生成* **数据领域:** *医患对话* [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
xDAN-datasets/ChatDoctor_chatdoctor_7k
[ "region:us" ]
2023-11-16T05:50:33+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "conversations_chatgpt", "list": [{"name": "from", "dtype": "string"}, {"name": "value", "dtype": "string"}]}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 14604774, "num_examples": 7321}], "download_size": 8420745, "dataset_size": 14604774}}
2023-11-20T07:53:19+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ChatDoctor_chatdoctor_7k" 数据集名称: *lavita/ChatDoctor-iCliniq* 数据集原型来源: *URL 数据规模: *7.32k* 数据生成: *由llm生成* 数据领域: *医患对话* More Information needed
[ "# Dataset Card for \"ChatDoctor_chatdoctor_7k\"\n数据集名称: \n*lavita/ChatDoctor-iCliniq*\n\n数据集原型来源: \n*URL\n\n\n数据规模: \n*7.32k*\n\n数据生成: \n*由llm生成*\n\n数据领域: \n*医患对话*\n\n\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ChatDoctor_chatdoctor_7k\"\n数据集名称: \n*lavita/ChatDoctor-iCliniq*\n\n数据集原型来源: \n*URL\n\n\n数据规模: \n*7.32k*\n\n数据生成: \n*由llm生成*\n\n数据领域: \n*医患对话*\n\n\n\nMore Information needed" ]
[ 6, 75 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ChatDoctor_chatdoctor_7k\"\n数据集名称: \n*lavita/ChatDoctor-iCliniq*\n\n数据集原型来源: \n*URL\n\n\n数据规模: \n*7.32k*\n\n数据生成: \n*由llm生成*\n\n数据领域: \n*医患对话*\n\n\n\nMore Information needed" ]
36a6e937b4b3371288d3696dad65173e0e067395
# Dataset Card for "find_first_sent_train_100_eval_20_baseline" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_first_sent_train_100_eval_20_baseline
[ "region:us" ]
2023-11-16T05:54:21+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 169972, "num_examples": 100}, {"name": "validation", "num_bytes": 35584, "num_examples": 20}], "download_size": 158682, "dataset_size": 205556}}
2023-11-16T05:54:28+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_first_sent_train_100_eval_20_baseline" More Information needed
[ "# Dataset Card for \"find_first_sent_train_100_eval_20_baseline\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_first_sent_train_100_eval_20_baseline\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_first_sent_train_100_eval_20_baseline\"\n\nMore Information needed" ]
4f71fb5ea7a6348d14d4e7c0c32b8d99c59fd4d5
# Dataset Card for "find_first_sent_train_50_eval_20_baseline" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_first_sent_train_50_eval_20_baseline
[ "region:us" ]
2023-11-16T05:54:32+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 82417, "num_examples": 50}, {"name": "validation", "num_bytes": 31654, "num_examples": 20}], "download_size": 98247, "dataset_size": 114071}}
2023-11-16T05:54:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_first_sent_train_50_eval_20_baseline" More Information needed
[ "# Dataset Card for \"find_first_sent_train_50_eval_20_baseline\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_first_sent_train_50_eval_20_baseline\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_first_sent_train_50_eval_20_baseline\"\n\nMore Information needed" ]
f268d8bb6bcf432b9e664455e617a6dae4542696
# Dataset Card for "find_first_sent_train_100_eval_10_baseline" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_first_sent_train_100_eval_10_baseline
[ "region:us" ]
2023-11-16T05:55:03+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 169972, "num_examples": 100}, {"name": "validation", "num_bytes": 17771, "num_examples": 10}], "download_size": 0, "dataset_size": 187743}}
2023-11-16T05:57:15+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_first_sent_train_100_eval_10_baseline" More Information needed
[ "# Dataset Card for \"find_first_sent_train_100_eval_10_baseline\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_first_sent_train_100_eval_10_baseline\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_first_sent_train_100_eval_10_baseline\"\n\nMore Information needed" ]
ad4d88ce051fbd6d046b859e0e2ea08cbc459095
# Dataset Card for "find_first_sent_train_50_eval_10_baseline" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_first_sent_train_50_eval_10_baseline
[ "region:us" ]
2023-11-16T05:55:17+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 82417, "num_examples": 50}, {"name": "validation", "num_bytes": 15313, "num_examples": 10}], "download_size": 0, "dataset_size": 97730}}
2023-11-16T05:57:22+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_first_sent_train_50_eval_10_baseline" More Information needed
[ "# Dataset Card for \"find_first_sent_train_50_eval_10_baseline\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_first_sent_train_50_eval_10_baseline\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_first_sent_train_50_eval_10_baseline\"\n\nMore Information needed" ]
eafaa8636c43a9b4452132663d796d05d2af44eb
# Dataset Card for "find_first_sent_train_30_eval_10_baseline" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_first_sent_train_30_eval_10_baseline
[ "region:us" ]
2023-11-16T05:55:31+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 49979, "num_examples": 30}, {"name": "validation", "num_bytes": 18259, "num_examples": 10}], "download_size": 0, "dataset_size": 68238}}
2023-11-16T05:57:28+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_first_sent_train_30_eval_10_baseline" More Information needed
[ "# Dataset Card for \"find_first_sent_train_30_eval_10_baseline\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_first_sent_train_30_eval_10_baseline\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_first_sent_train_30_eval_10_baseline\"\n\nMore Information needed" ]
53507e0a0ea175e6706a194afd063e529506ab53
# Dataset Card for "find_last_sent_train_100_eval_10_baseline" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_last_sent_train_100_eval_10_baseline
[ "region:us" ]
2023-11-16T05:55:45+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 168946, "num_examples": 100}, {"name": "validation", "num_bytes": 17643, "num_examples": 10}], "download_size": 0, "dataset_size": 186589}}
2023-11-16T05:57:49+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_last_sent_train_100_eval_10_baseline" More Information needed
[ "# Dataset Card for \"find_last_sent_train_100_eval_10_baseline\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_last_sent_train_100_eval_10_baseline\"\n\nMore Information needed" ]
[ 6, 28 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_last_sent_train_100_eval_10_baseline\"\n\nMore Information needed" ]
fee0eed63a1e52e60317877b5596ffc1ce448adf
# Dataset Card for "find_last_sent_train_50_eval_10_baseline" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_last_sent_train_50_eval_10_baseline
[ "region:us" ]
2023-11-16T05:56:00+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 80962, "num_examples": 50}, {"name": "validation", "num_bytes": 15247, "num_examples": 10}], "download_size": 0, "dataset_size": 96209}}
2023-11-16T05:57:55+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_last_sent_train_50_eval_10_baseline" More Information needed
[ "# Dataset Card for \"find_last_sent_train_50_eval_10_baseline\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_last_sent_train_50_eval_10_baseline\"\n\nMore Information needed" ]
[ 6, 28 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_last_sent_train_50_eval_10_baseline\"\n\nMore Information needed" ]
09a73d4ef1487ca7c79131073486d23087e84c7c
# Dataset Card for "find_last_sent_train_30_eval_10_baseline" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_last_sent_train_30_eval_10_baseline
[ "region:us" ]
2023-11-16T05:56:15+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 48938, "num_examples": 30}, {"name": "validation", "num_bytes": 18407, "num_examples": 10}], "download_size": 0, "dataset_size": 67345}}
2023-11-16T05:58:02+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_last_sent_train_30_eval_10_baseline" More Information needed
[ "# Dataset Card for \"find_last_sent_train_30_eval_10_baseline\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_last_sent_train_30_eval_10_baseline\"\n\nMore Information needed" ]
[ 6, 28 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_last_sent_train_30_eval_10_baseline\"\n\nMore Information needed" ]
f97ad31ebc8a3abebca22227ad8b9cc3c3309eeb
# Dataset Card for "find_second_sent_train_100_eval_10_baseline" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_second_sent_train_100_eval_10_baseline
[ "region:us" ]
2023-11-16T05:56:35+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 168555, "num_examples": 100}, {"name": "validation", "num_bytes": 17349, "num_examples": 10}], "download_size": 0, "dataset_size": 185904}}
2023-11-16T05:58:23+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_second_sent_train_100_eval_10_baseline" More Information needed
[ "# Dataset Card for \"find_second_sent_train_100_eval_10_baseline\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_second_sent_train_100_eval_10_baseline\"\n\nMore Information needed" ]
[ 6, 28 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_second_sent_train_100_eval_10_baseline\"\n\nMore Information needed" ]
0b8a29e044cb7361aba0159ebc08b178412345bb
# Dataset Card for "find_second_sent_train_50_eval_10_baseline" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_second_sent_train_50_eval_10_baseline
[ "region:us" ]
2023-11-16T05:56:48+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 80686, "num_examples": 50}, {"name": "validation", "num_bytes": 15357, "num_examples": 10}], "download_size": 0, "dataset_size": 96043}}
2023-11-16T05:58:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_second_sent_train_50_eval_10_baseline" More Information needed
[ "# Dataset Card for \"find_second_sent_train_50_eval_10_baseline\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_second_sent_train_50_eval_10_baseline\"\n\nMore Information needed" ]
[ 6, 28 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_second_sent_train_50_eval_10_baseline\"\n\nMore Information needed" ]
2fdc1fadf0aac2b0fd06f49e0bb90100f13ad5c2
# Dataset Card for "find_second_sent_train_30_eval_10_baseline" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_second_sent_train_30_eval_10_baseline
[ "region:us" ]
2023-11-16T05:57:02+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 48914, "num_examples": 30}, {"name": "validation", "num_bytes": 18561, "num_examples": 10}], "download_size": 0, "dataset_size": 67475}}
2023-11-16T05:58:36+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_second_sent_train_30_eval_10_baseline" More Information needed
[ "# Dataset Card for \"find_second_sent_train_30_eval_10_baseline\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_second_sent_train_30_eval_10_baseline\"\n\nMore Information needed" ]
[ 6, 28 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_second_sent_train_30_eval_10_baseline\"\n\nMore Information needed" ]
f447137997daafc7af54771f12abee2780a9e97d
# Dataset Card for "find_first_sent_train_10_eval_10_baseline" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_first_sent_train_10_eval_10_baseline
[ "region:us" ]
2023-11-16T05:57:36+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 17561, "num_examples": 10}, {"name": "validation", "num_bytes": 15422, "num_examples": 10}], "download_size": 48239, "dataset_size": 32983}}
2023-11-16T05:57:42+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_first_sent_train_10_eval_10_baseline" More Information needed
[ "# Dataset Card for \"find_first_sent_train_10_eval_10_baseline\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_first_sent_train_10_eval_10_baseline\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_first_sent_train_10_eval_10_baseline\"\n\nMore Information needed" ]
446ccfb8dc5076b35822b9a5dc692a7582e87d05
# Dataset Card for "find_last_sent_train_10_eval_10_baseline" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_last_sent_train_10_eval_10_baseline
[ "region:us" ]
2023-11-16T05:58:11+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 17207, "num_examples": 10}, {"name": "validation", "num_bytes": 15272, "num_examples": 10}], "download_size": 47522, "dataset_size": 32479}}
2023-11-16T05:58:17+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_last_sent_train_10_eval_10_baseline" More Information needed
[ "# Dataset Card for \"find_last_sent_train_10_eval_10_baseline\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_last_sent_train_10_eval_10_baseline\"\n\nMore Information needed" ]
[ 6, 28 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_last_sent_train_10_eval_10_baseline\"\n\nMore Information needed" ]
d585a6baa7e055c376280470a42ecb3ce37df232
# Dataset Card for "find_second_sent_train_10_eval_10_baseline" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_second_sent_train_10_eval_10_baseline
[ "region:us" ]
2023-11-16T05:58:45+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 17200, "num_examples": 10}, {"name": "validation", "num_bytes": 15289, "num_examples": 10}], "download_size": 47703, "dataset_size": 32489}}
2023-11-16T05:58:51+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_second_sent_train_10_eval_10_baseline" More Information needed
[ "# Dataset Card for \"find_second_sent_train_10_eval_10_baseline\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_second_sent_train_10_eval_10_baseline\"\n\nMore Information needed" ]
[ 6, 28 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_second_sent_train_10_eval_10_baseline\"\n\nMore Information needed" ]
85427bef54e9db333f2fbf47154fe4771b1162c4
# Dataset Card for "image-cls" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
chiragtubakad/image-cls
[ "region:us" ]
2023-11-16T06:00:51+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "bar", "1": "dot", "2": "line", "3": "pie"}}}}], "splits": [{"name": "train", "num_bytes": 196060933.544, "num_examples": 4184}, {"name": "test", "num_bytes": 50072332.492, "num_examples": 1046}], "download_size": 215148798, "dataset_size": 246133266.036}}
2023-11-16T06:06:29+00:00
[]
[]
TAGS #region-us
# Dataset Card for "image-cls" More Information needed
[ "# Dataset Card for \"image-cls\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"image-cls\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"image-cls\"\n\nMore Information needed" ]
eb4c7e07f3a15f5ed885ce1051a4858c748ee1ae
# OshiChats v2 OshiChats v2 is a dataset of 56 million high-quality English chat messages collected from various [VTuber](https://en.wikipedia.org/wiki/VTuber) live streams before 18th November 2023. ## Usage ```py from datasets import load_dataset chats_dataset = load_dataset('pykeio/oshichats-v2', split='train') print(chats_dataset[0]) ``` ## Samples ```json { "liver": "Millie Parfait", "flags": 16782594, "stream": { "id": "yt=aX-D4GDi14s", "topic": "asmr" }, "author": "Brandermau", "message": "Thank you for the blessed week <|liver:text=TWlsbGll|>!", "donation": true, "score": 21.930078506469727, "languages": [0.7040359377861023,0.1367727518081665,0.07201824337244034,0.05604061856865883,0.023007752373814583,0.008124674670398235,0.0,0.0,0.0,0.0], "analysis": [0.01581309176981449,0.0001037662077578716,0.0016704736044630408,0.00014343550719786435,0.000602249929215759,0.00019911097479052842], "delta_time": 365.5880126953125 } { "liver": "Finana Ryugu", "flags": 16781826, "stream": { "id": "yt=t82VaSIfAIA", "topic": "Final_Fantasy_Online" }, "author": "Yuki", "message": "Crossing fingers for her going the path I started with too so there are no extra spoilers", "donation": false, "score": 18.640567779541016, "languages": [0.9631453156471252,0.021243400871753693,0.01243548933416605,0.0014567276230081916,0.0011399302165955305,0.0005791507428511977,0.0,0.0,0.0,0.0], "analysis": [0.030425170436501503,0.0001254125963896513,0.0015225252136588097,0.0001571120519656688,0.0011026122374460101,0.0005010333843529224], "delta_time": 140.35299682617188 } ``` ## Data fields > 💡 Click on a field to display more information. <ul> <li> <details> <summary><code>liver</code></summary> Name of the talent hosting the live stream. </details> </li> <li> <details> <summary><code>flags</code></summary> Flags for the hosting talent. The 24th bit indicates the talent uses she/her pronouns. If not set, the talent uses he/him pronouns. The last four bits (<code>x & 0b1111</code>) indicate the liver's organization affiliation. Current affiliation values are: <ul> <li><code>0b0000</code>: Indie (no organization affiliation)</li> <li><code>0b0001</code>: Hololive</li> <li><code>0b0010</code>: Nijisanji</li> <li><code>0b0011</code>: idol Corp</li> <li><code>0b0100</code>: Phase Connect</li> </ul> </details> </li> <li> <details> <summary><code>stream</code></summary> Information about the livestream. Contains two fields, <code>id</code> and <code>topic</code>. <ul> <li> <code>topic</code> is the human-annotated topic of the live stream (i.e. <code>"talk"</code>, <code>"Minecraft"</code>, <code>"Singing"</code>), or <code>null</code> if a single topic could not be determined. </li> <li> <code>id</code> is the ID of the live stream, prefixed with either <code>yt=</code> or <code>tw=</code> to indicate a YouTube or Twitch stream respectively. </li> </ul> </details> </li> <li> <details> <summary><code>author</code></summary> Display name of the author of the chat message. </details> </li> <li> <details> <summary><code>message</code></summary> Contents of the chat message. Certain message contents are replaced with tags for usage in downstream tasks, see the section below on message tags for more information. </details> </li> <li> <details> <summary><code>donation</code></summary> Whether or not this message is a superchat or donation. </details> </li> <li> <details> <summary><code>score</code></summary> Neural quality score. Messages that engage in constructive conversation with the talent are rewarded with a higher score. </details> </li> <li> <details> <summary><code>languages</code></summary> Language detection score. In order, the languages represented in this field are: English, Japanese, Indonesian, Chinese, Korean, Tagalog, Spanish, Russian, French, German. </details> </li> <li> <details> <summary><code>analysis</code></summary> Internal, not intended to be used. </details> </li> <li> <details> <summary><code>delta_time</code></summary> The time the message was sent, represented as an offset since the beginning of the stream, in seconds. </details> </li> </ul> ## Message tags <ul> <li> <details> <summary><code>&lt;|liver:text={text}|&gt;</code></summary> References the talent by their name or one of their nicknames. <code>text</code> is the original text, encoded as base64. </details> </li> <li> <details> <summary><code>&lt;|fans:text={text}|&gt;</code></summary> References the fanbase of the talent. Some talents will give their fanbase a name, i.e. Petra Gurin refers to her viewers as "pentomos". <code>text</code> is the original text, encoded as base64. </details> </li> <li> <details> <summary><code>&lt;|collaborator:liver={other}:text={text}:affiliation={aff}|&gt;</code></summary> References another talent by their name or one of their nicknames. The talent is confirmed via human review to be present in the stream and actively collaborating with the stream host. <code>other</code> is the full canonical name of the referenced talent. <code>text</code> is the original text, encoded as base64. <code>aff</code> is the affiliation of the other talent. This has two values - <code>org</code> (meaning the referenced talent is affiliated with the same organization as the stream host) and <code>none</code> (meaning the referenced talent is from another organization). </details> </li> <li> <details> <summary><code>&lt;|collaborator-fans:text={text}|&gt;</code></summary> References the fanbase of one of the collaborators present in the stream. <code>text</code> is the original text, encoded as base64. </details> </li> <li> <details> <summary><code>&lt;|maybe-collaborator:liver={other}:text={text}:affiliation={aff}|&gt;</code></summary> References another talent by their name or one of their nicknames. The talent <b>may or may not</b> be present in the stream. <code>other</code> is the full canonical name of the referenced talent. <code>text</code> is the original text, encoded as base64. <code>aff</code> is the affiliation of the other talent. This has two values - <code>org</code> (meaning the referenced talent is affiliated with the same organization as the stream host) and <code>none</code> (meaning the referenced talent is from another organization). If the affiliation is <code>none</code>, it is safe to regard this tag as a false positive in most cases. </details> </li> </ul> ## License Licensed under [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/); you must give attribution to pyke.io in any derivatives of this dataset, including models trained using its data.
pykeio/oshichats-v2
[ "task_categories:text-classification", "task_categories:conversational", "task_categories:text-generation", "task_categories:token-classification", "annotations_creators:crowdsourced", "language_creators:found", "size_categories:10M<n<100M", "language:en", "license:cc-by-4.0", "livestream", "stream", "chat", "messages", "vtuber", "vtubers", "twitch", "youtube", "region:us" ]
2023-11-16T06:08:32+00:00
{"annotations_creators": ["crowdsourced"], "language_creators": ["found"], "language": ["en"], "license": "cc-by-4.0", "size_categories": ["10M<n<100M"], "task_categories": ["text-classification", "conversational", "text-generation", "token-classification"], "pretty_name": "OshiChats v2", "tags": ["livestream", "stream", "chat", "messages", "vtuber", "vtubers", "twitch", "youtube"]}
2023-11-27T04:41:05+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #task_categories-conversational #task_categories-text-generation #task_categories-token-classification #annotations_creators-crowdsourced #language_creators-found #size_categories-10M<n<100M #language-English #license-cc-by-4.0 #livestream #stream #chat #messages #vtuber #vtubers #twitch #youtube #region-us
# OshiChats v2 OshiChats v2 is a dataset of 56 million high-quality English chat messages collected from various VTuber live streams before 18th November 2023. ## Usage ## Samples ## Data fields > Click on a field to display more information. <ul> <li> <details> <summary><code>liver</code></summary> Name of the talent hosting the live stream. </details> </li> <li> <details> <summary><code>flags</code></summary> Flags for the hosting talent. The 24th bit indicates the talent uses she/her pronouns. If not set, the talent uses he/him pronouns. The last four bits (<code>x & 0b1111</code>) indicate the liver's organization affiliation. Current affiliation values are: <ul> <li><code>0b0000</code>: Indie (no organization affiliation)</li> <li><code>0b0001</code>: Hololive</li> <li><code>0b0010</code>: Nijisanji</li> <li><code>0b0011</code>: idol Corp</li> <li><code>0b0100</code>: Phase Connect</li> </ul> </details> </li> <li> <details> <summary><code>stream</code></summary> Information about the livestream. Contains two fields, <code>id</code> and <code>topic</code>. <ul> <li> <code>topic</code> is the human-annotated topic of the live stream (i.e. <code>"talk"</code>, <code>"Minecraft"</code>, <code>"Singing"</code>), or <code>null</code> if a single topic could not be determined. </li> <li> <code>id</code> is the ID of the live stream, prefixed with either <code>yt=</code> or <code>tw=</code> to indicate a YouTube or Twitch stream respectively. </li> </ul> </details> </li> <li> <details> <summary><code>author</code></summary> Display name of the author of the chat message. </details> </li> <li> <details> <summary><code>message</code></summary> Contents of the chat message. Certain message contents are replaced with tags for usage in downstream tasks, see the section below on message tags for more information. </details> </li> <li> <details> <summary><code>donation</code></summary> Whether or not this message is a superchat or donation. </details> </li> <li> <details> <summary><code>score</code></summary> Neural quality score. Messages that engage in constructive conversation with the talent are rewarded with a higher score. </details> </li> <li> <details> <summary><code>languages</code></summary> Language detection score. In order, the languages represented in this field are: English, Japanese, Indonesian, Chinese, Korean, Tagalog, Spanish, Russian, French, German. </details> </li> <li> <details> <summary><code>analysis</code></summary> Internal, not intended to be used. </details> </li> <li> <details> <summary><code>delta_time</code></summary> The time the message was sent, represented as an offset since the beginning of the stream, in seconds. </details> </li> </ul> ## Message tags <ul> <li> <details> <summary><code>&lt;|liver:text={text}|&gt;</code></summary> References the talent by their name or one of their nicknames. <code>text</code> is the original text, encoded as base64. </details> </li> <li> <details> <summary><code>&lt;|fans:text={text}|&gt;</code></summary> References the fanbase of the talent. Some talents will give their fanbase a name, i.e. Petra Gurin refers to her viewers as "pentomos". <code>text</code> is the original text, encoded as base64. </details> </li> <li> <details> <summary><code>&lt;|collaborator:liver={other}:text={text}:affiliation={aff}|&gt;</code></summary> References another talent by their name or one of their nicknames. The talent is confirmed via human review to be present in the stream and actively collaborating with the stream host. <code>other</code> is the full canonical name of the referenced talent. <code>text</code> is the original text, encoded as base64. <code>aff</code> is the affiliation of the other talent. This has two values - <code>org</code> (meaning the referenced talent is affiliated with the same organization as the stream host) and <code>none</code> (meaning the referenced talent is from another organization). </details> </li> <li> <details> <summary><code>&lt;|collaborator-fans:text={text}|&gt;</code></summary> References the fanbase of one of the collaborators present in the stream. <code>text</code> is the original text, encoded as base64. </details> </li> <li> <details> <summary><code>&lt;|maybe-collaborator:liver={other}:text={text}:affiliation={aff}|&gt;</code></summary> References another talent by their name or one of their nicknames. The talent <b>may or may not</b> be present in the stream. <code>other</code> is the full canonical name of the referenced talent. <code>text</code> is the original text, encoded as base64. <code>aff</code> is the affiliation of the other talent. This has two values - <code>org</code> (meaning the referenced talent is affiliated with the same organization as the stream host) and <code>none</code> (meaning the referenced talent is from another organization). If the affiliation is <code>none</code>, it is safe to regard this tag as a false positive in most cases. </details> </li> </ul> ## License Licensed under CC BY 4.0; you must give attribution to URL in any derivatives of this dataset, including models trained using its data.
[ "# OshiChats v2\nOshiChats v2 is a dataset of 56 million high-quality English chat messages collected from various VTuber live streams before 18th November 2023.", "## Usage", "## Samples", "## Data fields\n> Click on a field to display more information.\n\n<ul>\n <li>\n <details>\n <summary><code>liver</code></summary>\n Name of the talent hosting the live stream.\n </details>\n </li>\n <li>\n <details>\n <summary><code>flags</code></summary>\n Flags for the hosting talent.\n The 24th bit indicates the talent uses she/her pronouns. If not set, the talent uses he/him pronouns.\n The last four bits (<code>x & 0b1111</code>) indicate the liver's organization affiliation. Current affiliation values are:\n <ul>\n <li><code>0b0000</code>: Indie (no organization affiliation)</li>\n <li><code>0b0001</code>: Hololive</li>\n <li><code>0b0010</code>: Nijisanji</li>\n <li><code>0b0011</code>: idol Corp</li>\n <li><code>0b0100</code>: Phase Connect</li>\n </ul>\n </details>\n </li>\n <li>\n <details>\n <summary><code>stream</code></summary>\n Information about the livestream. Contains two fields, <code>id</code> and <code>topic</code>.\n <ul>\n <li>\n <code>topic</code> is the human-annotated topic of the live stream (i.e. <code>\"talk\"</code>, <code>\"Minecraft\"</code>, <code>\"Singing\"</code>), or <code>null</code> if a single topic could not be determined.\n </li>\n <li>\n <code>id</code> is the ID of the live stream, prefixed with either <code>yt=</code> or <code>tw=</code> to indicate a YouTube or Twitch stream respectively.\n </li>\n </ul>\n </details>\n </li>\n <li>\n <details>\n <summary><code>author</code></summary>\n Display name of the author of the chat message.\n </details>\n </li>\n <li>\n <details>\n <summary><code>message</code></summary>\n Contents of the chat message.\n Certain message contents are replaced with tags for usage in downstream tasks, see the section below on message tags for more information.\n </details>\n </li>\n <li>\n <details>\n <summary><code>donation</code></summary>\n Whether or not this message is a superchat or donation.\n </details>\n </li>\n <li>\n <details>\n <summary><code>score</code></summary>\n Neural quality score. Messages that engage in constructive conversation with the talent are rewarded with a higher score.\n </details>\n </li>\n <li>\n <details>\n <summary><code>languages</code></summary>\n Language detection score. In order, the languages represented in this field are: English, Japanese, Indonesian, Chinese, Korean, Tagalog, Spanish, Russian, French, German.\n </details>\n </li>\n <li>\n <details>\n <summary><code>analysis</code></summary>\n Internal, not intended to be used.\n </details>\n </li>\n <li>\n <details>\n <summary><code>delta_time</code></summary>\n The time the message was sent, represented as an offset since the beginning of the stream, in seconds.\n </details>\n </li>\n</ul>", "## Message tags\n<ul>\n <li>\n <details>\n <summary><code>&lt;|liver:text={text}|&gt;</code></summary>\n References the talent by their name or one of their nicknames.\n <code>text</code> is the original text, encoded as base64.\n </details>\n </li>\n <li>\n <details>\n <summary><code>&lt;|fans:text={text}|&gt;</code></summary>\n References the fanbase of the talent.\n Some talents will give their fanbase a name, i.e. Petra Gurin refers to her viewers as \"pentomos\".\n <code>text</code> is the original text, encoded as base64.\n </details>\n </li>\n <li>\n <details>\n <summary><code>&lt;|collaborator:liver={other}:text={text}:affiliation={aff}|&gt;</code></summary>\n References another talent by their name or one of their nicknames.\n The talent is confirmed via human review to be present in the stream and actively collaborating with the stream host.\n <code>other</code> is the full canonical name of the referenced talent.\n <code>text</code> is the original text, encoded as base64.\n <code>aff</code> is the affiliation of the other talent. This has two values - <code>org</code> (meaning the referenced talent is affiliated with the same organization as the stream host) and <code>none</code> (meaning the referenced talent is from another organization).\n </details>\n </li>\n <li>\n <details>\n <summary><code>&lt;|collaborator-fans:text={text}|&gt;</code></summary>\n References the fanbase of one of the collaborators present in the stream.\n <code>text</code> is the original text, encoded as base64.\n </details>\n </li>\n <li>\n <details>\n <summary><code>&lt;|maybe-collaborator:liver={other}:text={text}:affiliation={aff}|&gt;</code></summary>\n References another talent by their name or one of their nicknames.\n The talent <b>may or may not</b> be present in the stream.\n <code>other</code> is the full canonical name of the referenced talent.\n <code>text</code> is the original text, encoded as base64.\n <code>aff</code> is the affiliation of the other talent. This has two values - <code>org</code> (meaning the referenced talent is affiliated with the same organization as the stream host) and <code>none</code> (meaning the referenced talent is from another organization). If the affiliation is <code>none</code>, it is safe to regard this tag as a false positive in most cases.\n </details>\n </li>\n</ul>", "## License\nLicensed under CC BY 4.0; you must give attribution to URL in any derivatives of this dataset, including models trained using its data." ]
[ "TAGS\n#task_categories-text-classification #task_categories-conversational #task_categories-text-generation #task_categories-token-classification #annotations_creators-crowdsourced #language_creators-found #size_categories-10M<n<100M #language-English #license-cc-by-4.0 #livestream #stream #chat #messages #vtuber #vtubers #twitch #youtube #region-us \n", "# OshiChats v2\nOshiChats v2 is a dataset of 56 million high-quality English chat messages collected from various VTuber live streams before 18th November 2023.", "## Usage", "## Samples", "## Data fields\n> Click on a field to display more information.\n\n<ul>\n <li>\n <details>\n <summary><code>liver</code></summary>\n Name of the talent hosting the live stream.\n </details>\n </li>\n <li>\n <details>\n <summary><code>flags</code></summary>\n Flags for the hosting talent.\n The 24th bit indicates the talent uses she/her pronouns. If not set, the talent uses he/him pronouns.\n The last four bits (<code>x & 0b1111</code>) indicate the liver's organization affiliation. Current affiliation values are:\n <ul>\n <li><code>0b0000</code>: Indie (no organization affiliation)</li>\n <li><code>0b0001</code>: Hololive</li>\n <li><code>0b0010</code>: Nijisanji</li>\n <li><code>0b0011</code>: idol Corp</li>\n <li><code>0b0100</code>: Phase Connect</li>\n </ul>\n </details>\n </li>\n <li>\n <details>\n <summary><code>stream</code></summary>\n Information about the livestream. Contains two fields, <code>id</code> and <code>topic</code>.\n <ul>\n <li>\n <code>topic</code> is the human-annotated topic of the live stream (i.e. <code>\"talk\"</code>, <code>\"Minecraft\"</code>, <code>\"Singing\"</code>), or <code>null</code> if a single topic could not be determined.\n </li>\n <li>\n <code>id</code> is the ID of the live stream, prefixed with either <code>yt=</code> or <code>tw=</code> to indicate a YouTube or Twitch stream respectively.\n </li>\n </ul>\n </details>\n </li>\n <li>\n <details>\n <summary><code>author</code></summary>\n Display name of the author of the chat message.\n </details>\n </li>\n <li>\n <details>\n <summary><code>message</code></summary>\n Contents of the chat message.\n Certain message contents are replaced with tags for usage in downstream tasks, see the section below on message tags for more information.\n </details>\n </li>\n <li>\n <details>\n <summary><code>donation</code></summary>\n Whether or not this message is a superchat or donation.\n </details>\n </li>\n <li>\n <details>\n <summary><code>score</code></summary>\n Neural quality score. Messages that engage in constructive conversation with the talent are rewarded with a higher score.\n </details>\n </li>\n <li>\n <details>\n <summary><code>languages</code></summary>\n Language detection score. In order, the languages represented in this field are: English, Japanese, Indonesian, Chinese, Korean, Tagalog, Spanish, Russian, French, German.\n </details>\n </li>\n <li>\n <details>\n <summary><code>analysis</code></summary>\n Internal, not intended to be used.\n </details>\n </li>\n <li>\n <details>\n <summary><code>delta_time</code></summary>\n The time the message was sent, represented as an offset since the beginning of the stream, in seconds.\n </details>\n </li>\n</ul>", "## Message tags\n<ul>\n <li>\n <details>\n <summary><code>&lt;|liver:text={text}|&gt;</code></summary>\n References the talent by their name or one of their nicknames.\n <code>text</code> is the original text, encoded as base64.\n </details>\n </li>\n <li>\n <details>\n <summary><code>&lt;|fans:text={text}|&gt;</code></summary>\n References the fanbase of the talent.\n Some talents will give their fanbase a name, i.e. Petra Gurin refers to her viewers as \"pentomos\".\n <code>text</code> is the original text, encoded as base64.\n </details>\n </li>\n <li>\n <details>\n <summary><code>&lt;|collaborator:liver={other}:text={text}:affiliation={aff}|&gt;</code></summary>\n References another talent by their name or one of their nicknames.\n The talent is confirmed via human review to be present in the stream and actively collaborating with the stream host.\n <code>other</code> is the full canonical name of the referenced talent.\n <code>text</code> is the original text, encoded as base64.\n <code>aff</code> is the affiliation of the other talent. This has two values - <code>org</code> (meaning the referenced talent is affiliated with the same organization as the stream host) and <code>none</code> (meaning the referenced talent is from another organization).\n </details>\n </li>\n <li>\n <details>\n <summary><code>&lt;|collaborator-fans:text={text}|&gt;</code></summary>\n References the fanbase of one of the collaborators present in the stream.\n <code>text</code> is the original text, encoded as base64.\n </details>\n </li>\n <li>\n <details>\n <summary><code>&lt;|maybe-collaborator:liver={other}:text={text}:affiliation={aff}|&gt;</code></summary>\n References another talent by their name or one of their nicknames.\n The talent <b>may or may not</b> be present in the stream.\n <code>other</code> is the full canonical name of the referenced talent.\n <code>text</code> is the original text, encoded as base64.\n <code>aff</code> is the affiliation of the other talent. This has two values - <code>org</code> (meaning the referenced talent is affiliated with the same organization as the stream host) and <code>none</code> (meaning the referenced talent is from another organization). If the affiliation is <code>none</code>, it is safe to regard this tag as a false positive in most cases.\n </details>\n </li>\n</ul>", "## License\nLicensed under CC BY 4.0; you must give attribution to URL in any derivatives of this dataset, including models trained using its data." ]
[ 119, 42, 3, 3, 835, 710, 33 ]
[ "passage: TAGS\n#task_categories-text-classification #task_categories-conversational #task_categories-text-generation #task_categories-token-classification #annotations_creators-crowdsourced #language_creators-found #size_categories-10M<n<100M #language-English #license-cc-by-4.0 #livestream #stream #chat #messages #vtuber #vtubers #twitch #youtube #region-us \n# OshiChats v2\nOshiChats v2 is a dataset of 56 million high-quality English chat messages collected from various VTuber live streams before 18th November 2023.## Usage## Samples", "passage: ## Data fields\n> Click on a field to display more information.\n\n<ul>\n <li>\n <details>\n <summary><code>liver</code></summary>\n Name of the talent hosting the live stream.\n </details>\n </li>\n <li>\n <details>\n <summary><code>flags</code></summary>\n Flags for the hosting talent.\n The 24th bit indicates the talent uses she/her pronouns. If not set, the talent uses he/him pronouns.\n The last four bits (<code>x & 0b1111</code>) indicate the liver's organization affiliation. Current affiliation values are:\n <ul>\n <li><code>0b0000</code>: Indie (no organization affiliation)</li>\n <li><code>0b0001</code>: Hololive</li>\n <li><code>0b0010</code>: Nijisanji</li>\n <li><code>0b0011</code>: idol Corp</li>\n <li><code>0b0100</code>: Phase Connect</li>\n </ul>\n </details>\n </li>\n <li>\n <details>\n <summary><code>stream</code></summary>\n Information about the livestream. Contains two fields, <code>id</code> and <code>topic</code>.\n <ul>\n <li>\n <code>topic</code> is the human-annotated topic of the live stream (i.e. <code>\"talk\"</code>, <code>\"Minecraft\"</code>, <code>\"Singing\"</code>), or <code>null</code> if a single topic could not be determined.\n </li>\n <li>\n <code>id</code> is the ID of the live stream, prefixed with either <code>yt=</code> or <code>tw=</code> to indicate a YouTube or Twitch stream respectively.\n </li>\n </ul>\n </details>\n </li>\n <li>\n <details>\n <summary><code>author</code></summary>\n Display name of the author of the chat message.\n </details>\n </li>\n <li>\n <details>\n <summary><code>message</code></summary>\n Contents of the chat message.\n Certain message contents are replaced with tags for usage in downstream tasks, see the section below on message tags for more information.\n </details>\n </li>\n <li>\n <details>\n <summary><code>donation</code></summary>\n Whether or not this message is a superchat or donation.\n </details>\n </li>\n <li>\n <details>\n <summary><code>score</code></summary>\n Neural quality score. Messages that engage in constructive conversation with the talent are rewarded with a higher score.\n </details>\n </li>\n <li>\n <details>\n <summary><code>languages</code></summary>\n Language detection score. In order, the languages represented in this field are: English, Japanese, Indonesian, Chinese, Korean, Tagalog, Spanish, Russian, French, German.\n </details>\n </li>\n <li>\n <details>\n <summary><code>analysis</code></summary>\n Internal, not intended to be used.\n </details>\n </li>\n <li>\n <details>\n <summary><code>delta_time</code></summary>\n The time the message was sent, represented as an offset since the beginning of the stream, in seconds.\n </details>\n </li>\n</ul>" ]
1f0cf4f85a47dd5f33371f549ede1bb830fc4a6d
# Dataset Card for "radiology-samples" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
cp500/radiology-samples
[ "region:us" ]
2023-11-16T06:42:28+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 105035647, "num_examples": 135466}, {"name": "test", "num_bytes": 26470297, "num_examples": 33869}], "download_size": 54294813, "dataset_size": 131505944}}
2023-11-16T06:42:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for "radiology-samples" More Information needed
[ "# Dataset Card for \"radiology-samples\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"radiology-samples\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"radiology-samples\"\n\nMore Information needed" ]
7c005a0d285ab2a9a65308ff2503a26f22531827
# Dataset Card for Dataset Name <!-- Provide a quick summary of the dataset. --> An image dataset for video interpolation focusing on video games ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed]
animadot/gaming-hfr
[ "license:apache-2.0", "region:us" ]
2023-11-16T06:42:42+00:00
{"license": "apache-2.0", "dataset_info": {"features": [{"name": "image", "dtype": "image"}]}}
2023-11-16T10:02:04+00:00
[]
[]
TAGS #license-apache-2.0 #region-us
# Dataset Card for Dataset Name An image dataset for video interpolation focusing on video games ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional]
[ "# Dataset Card for Dataset Name\n\n\n\nAn image dataset for video interpolation focusing on video games", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]" ]
[ "TAGS\n#license-apache-2.0 #region-us \n", "# Dataset Card for Dataset Name\n\n\n\nAn image dataset for video interpolation focusing on video games", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]" ]
[ 14, 22, 4, 40, 3, 4, 9, 6, 5, 7, 4, 7, 10, 10, 46, 8 ]
[ "passage: TAGS\n#license-apache-2.0 #region-us \n# Dataset Card for Dataset Name\n\n\n\nAn image dataset for video interpolation focusing on video games## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]" ]
beb44ef0ef774ef988a2434775683f89a467ffef
# Dataset Card for "mri-samples" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
cp500/mri-samples
[ "region:us" ]
2023-11-16T06:43:38+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 46044813, "num_examples": 30504}, {"name": "test", "num_bytes": 5248961, "num_examples": 3390}], "download_size": 23104510, "dataset_size": 51293774}}
2023-11-16T06:43:45+00:00
[]
[]
TAGS #region-us
# Dataset Card for "mri-samples" More Information needed
[ "# Dataset Card for \"mri-samples\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"mri-samples\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"mri-samples\"\n\nMore Information needed" ]
ee60a1e8e3f6c710d076fd634f40064a1e4d8dc9
# Dataset Card for "ultrasound-samples" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
cp500/ultrasound-samples
[ "region:us" ]
2023-11-16T06:44:47+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 5626157, "num_examples": 6433}], "download_size": 1914383, "dataset_size": 5626157}}
2023-11-16T06:44:49+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ultrasound-samples" More Information needed
[ "# Dataset Card for \"ultrasound-samples\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ultrasound-samples\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ultrasound-samples\"\n\nMore Information needed" ]
02464c7c29ffa87bd729d2b7e616a9c84052b8f3
# Dataset Card for "CT-samples" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
cp500/CT-samples
[ "region:us" ]
2023-11-16T06:47:38+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 748569212, "num_examples": 582951}], "download_size": 302697027, "dataset_size": 748569212}}
2023-11-16T06:48:12+00:00
[]
[]
TAGS #region-us
# Dataset Card for "CT-samples" More Information needed
[ "# Dataset Card for \"CT-samples\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"CT-samples\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"CT-samples\"\n\nMore Information needed" ]
0b9daf4fb3568685ed05e6c7dfb2a5483f6d8a48
# Dataset Card for "elpv-augmented" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mjphayes/elpv-augmented
[ "region:us" ]
2023-11-16T06:48:44+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "class", "dtype": "int64"}, {"name": "type", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 138172974.72, "num_examples": 4416}, {"name": "validation", "num_bytes": 13534024.0, "num_examples": 394}, {"name": "test", "num_bytes": 22354586.0, "num_examples": 654}], "download_size": 191567217, "dataset_size": 174061584.72}}
2023-11-17T04:01:58+00:00
[]
[]
TAGS #region-us
# Dataset Card for "elpv-augmented" More Information needed
[ "# Dataset Card for \"elpv-augmented\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"elpv-augmented\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"elpv-augmented\"\n\nMore Information needed" ]
64ee9fb44aedabd396dd288bcfd4f537296c9983
# Dataset Card for "multi-modal" This dataset has been created with [Argilla](https://docs.argilla.io). As shown in the sections below, this dataset can be loaded into Argilla as explained in [Load with Argilla](#load-with-argilla) or used directly with the `datasets` library in [Load with `datasets`](#load-with-datasets). ## Dataset Description - **Homepage:** https://argilla.io - **Repository:** https://github.com/argilla-io/argilla Argilla supports Markdown within its text fields. This means you can easily add formatting like **bold** and *italic* text, [links](https://www.google.com), and even insert HTML elements like images, audios, videos, and iframes. A multi-modal dataset can be used to create a dataset with text and different types of media content. It can be useful for different tasks, such as image captioning, video captioning, audio captioning, and so on. So, this is a multi-modal dataset example that uses three different datasets from Hugging Face: * **Video**: We use an action recognition dataset, the [ucf101-subset](https://huggingface.co/datasets/sayakpaul/ucf101-subset) from the [UCF101](https://www.crcv.ucf.edu/data/UCF101.php). This dataset contains realistic action videos from YouTube, classified in 101 actions. * **Audio**: We use an audio classification dataset, the [ccmusic-database/bel_folk](https://huggingface.co/datasets/ccmusic-database/bel_folk). This dataset contains 1 minute audio clips of Chinese folk music, and the genre of the music. * **Image**: We use an image classification dataset, the [zishuod/pokemon-icons](https://huggingface.co/datasets/zishuod/pokemon-icons). This dataset contains images of Pokemon that need to be classified. ### Dataset Summary This dataset contains: * A dataset configuration file conforming to the Argilla dataset format named `argilla.yaml`. This configuration file will be used to configure the dataset when using the `FeedbackDataset.from_huggingface` method in Argilla. * Dataset records in a format compatible with HuggingFace `datasets`. These records will be loaded automatically when using `FeedbackDataset.from_huggingface` and can be loaded independently using the `datasets` library via `load_dataset`. ### Load with Argilla To load with Argilla, you'll just need to install Argilla as `pip install argilla --upgrade` and then use the following code: ```python import argilla as rg ds = rg.FeedbackDataset.from_huggingface("argilla/multi-modal") ``` ### Load with `datasets` To load this dataset with `datasets`, you'll just need to install `datasets` as `pip install datasets --upgrade` and then use the following code: ```python from datasets import load_dataset ds = load_dataset("argilla/multi-modal") ``` ### Supported Tasks - Multi-modal classification - Multi-modal transcription ## Dataset Structure ### Data in Argilla The dataset is created in Argilla with: **fields**, **questions**, **suggestions**, **metadata**, and **guidelines**. The **fields** are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. | Field Name | Title | Type | Required | Markdown | | ---------- | ----- | ---- | -------- | -------- | | text | Text | text | True | False | The **questions** are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label_selection, multi_label_selection, or ranking. | Question Name | Title | Type | Required | Description | Values/Labels | | ------------- | ----- | ---- | -------- | ----------- | ------------- | | label | Label | label_selection | True | N/A | ['World', 'Sports', 'Business', 'Sci/Tech'] | The **suggestions** are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending "-suggestion" and "-suggestion-metadata" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with "-suggestion" and the metadata is appended with "-suggestion-metadata". **✨ NEW** The **metadata** is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the `metadata_properties` defined in the dataset configuration file in `argilla.yaml`. The **guidelines**, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the [annotation guidelines](#annotation-guidelines) section. #### Data in "multi-modal" Dataset * **Fields:** These are the records, each of them is a video, audio or image file encoded in base64. * **text** is of type `text`. * **Questions:** These are the questions that should be annotated. * **TextQuestion** is a feature to describe the content in detail. * **RatingQuestion** will allow us to rate the content's quality effectively. * **LabelQuestion** is for tagging the content with the most suitable age group. * **Metadata:** Three metadata properties are added to streamline content management. * **groups** is to identify the assigned annotator group. * **media** will specify the media source. * **source-dataset** will highlight the source dataset of the content in each record. ### Data Splits The dataset contains a single split, which is `train`.
argilla/multi-modal
[ "region:us" ]
2023-11-16T07:20:09+00:00
{"dataset_info": {"features": [{"name": "content", "dtype": "string", "id": "field"}, {"name": "description", "list": [{"name": "user_id", "dtype": "string", "id": "question"}, {"name": "value", "dtype": "string", "id": "suggestion"}, {"name": "status", "dtype": "string", "id": "question"}]}, {"name": "description-suggestion", "dtype": "string", "id": "suggestion"}, {"name": "description-suggestion-metadata", "struct": [{"name": "type", "dtype": "string", "id": "suggestion-metadata"}, {"name": "score", "dtype": "float32", "id": "suggestion-metadata"}, {"name": "agent", "dtype": "string", "id": "suggestion-metadata"}]}, {"name": "quality", "list": [{"name": "user_id", "dtype": "string", "id": "question"}, {"name": "value", "dtype": "int32", "id": "suggestion"}, {"name": "status", "dtype": "string", "id": "question"}]}, {"name": "quality-suggestion", "dtype": "int32", "id": "suggestion"}, {"name": "quality-suggestion-metadata", "struct": [{"name": "type", "dtype": "string", "id": "suggestion-metadata"}, {"name": "score", "dtype": "float32", "id": "suggestion-metadata"}, {"name": "agent", "dtype": "string", "id": "suggestion-metadata"}]}, {"name": "age_group", "list": [{"name": "user_id", "dtype": "string", "id": "question"}, {"name": "value", "dtype": "string", "id": "suggestion"}, {"name": "status", "dtype": "string", "id": "question"}]}, {"name": "age_group-suggestion", "dtype": "string", "id": "suggestion"}, {"name": "age_group-suggestion-metadata", "struct": [{"name": "type", "dtype": "string", "id": "suggestion-metadata"}, {"name": "score", "dtype": "float32", "id": "suggestion-metadata"}, {"name": "agent", "dtype": "string", "id": "suggestion-metadata"}]}, {"name": "external_id", "dtype": "string", "id": "external_id"}, {"name": "metadata", "dtype": "string", "id": "metadata"}], "splits": [{"name": "train", "num_bytes": 76240752, "num_examples": 60}], "download_size": 0, "dataset_size": 76240752}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-20T10:19:18+00:00
[]
[]
TAGS #region-us
Dataset Card for "multi-modal" ============================== This dataset has been created with Argilla. As shown in the sections below, this dataset can be loaded into Argilla as explained in Load with Argilla or used directly with the 'datasets' library in Load with 'datasets'. Dataset Description ------------------- * Homepage: URL * Repository: URL Argilla supports Markdown within its text fields. This means you can easily add formatting like bold and *italic* text, links, and even insert HTML elements like images, audios, videos, and iframes. A multi-modal dataset can be used to create a dataset with text and different types of media content. It can be useful for different tasks, such as image captioning, video captioning, audio captioning, and so on. So, this is a multi-modal dataset example that uses three different datasets from Hugging Face: * Video: We use an action recognition dataset, the ucf101-subset from the UCF101. This dataset contains realistic action videos from YouTube, classified in 101 actions. * Audio: We use an audio classification dataset, the ccmusic-database/bel\_folk. This dataset contains 1 minute audio clips of Chinese folk music, and the genre of the music. * Image: We use an image classification dataset, the zishuod/pokemon-icons. This dataset contains images of Pokemon that need to be classified. ### Dataset Summary This dataset contains: * A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\_huggingface' method in Argilla. * Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\_huggingface' and can be loaded independently using the 'datasets' library via 'load\_dataset'. ### Load with Argilla To load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code: ### Load with 'datasets' To load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code: ### Supported Tasks * Multi-modal classification * Multi-modal transcription Dataset Structure ----------------- ### Data in Argilla The dataset is created in Argilla with: fields, questions, suggestions, metadata, and guidelines. The fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. The questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\_selection, multi\_label\_selection, or ranking. The suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending "-suggestion" and "-suggestion-metadata" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with "-suggestion" and the metadata is appended with "-suggestion-metadata". NEW The metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\_properties' defined in the dataset configuration file in 'URL'. The guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section. #### Data in "multi-modal" Dataset * Fields: These are the records, each of them is a video, audio or image file encoded in base64. + text is of type 'text'. * Questions: These are the questions that should be annotated. + TextQuestion is a feature to describe the content in detail. + RatingQuestion will allow us to rate the content's quality effectively. + LabelQuestion is for tagging the content with the most suitable age group. * Metadata: Three metadata properties are added to streamline content management. + groups is to identify the assigned annotator group. + media will specify the media source. + source-dataset will highlight the source dataset of the content in each record. ### Data Splits The dataset contains a single split, which is 'train'.
[ "### Dataset Summary\n\n\nThis dataset contains:\n\n\n* A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\\_huggingface' method in Argilla.\n* Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\\_huggingface' and can be loaded independently using the 'datasets' library via 'load\\_dataset'.", "### Load with Argilla\n\n\nTo load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code:", "### Load with 'datasets'\n\n\nTo load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code:", "### Supported Tasks\n\n\n* Multi-modal classification\n* Multi-modal transcription\n\n\nDataset Structure\n-----------------", "### Data in Argilla\n\n\nThe dataset is created in Argilla with: fields, questions, suggestions, metadata, and guidelines.\n\n\nThe fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\nThe questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\\_selection, multi\\_label\\_selection, or ranking.\n\n\n\nThe suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending \"-suggestion\" and \"-suggestion-metadata\" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with \"-suggestion\" and the metadata is appended with \"-suggestion-metadata\".\n\n\nNEW The metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n\n\nThe guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section.", "#### Data in \"multi-modal\" Dataset\n\n\n* Fields: These are the records, each of them is a video, audio or image file encoded in base64.\n\n\n\t+ text is of type 'text'.\n* Questions: These are the questions that should be annotated.\n\n\n\t+ TextQuestion is a feature to describe the content in detail.\n\t+ RatingQuestion will allow us to rate the content's quality effectively.\n\t+ LabelQuestion is for tagging the content with the most suitable age group.\n* Metadata: Three metadata properties are added to streamline content management.\n\n\n\t+ groups is to identify the assigned annotator group.\n\t+ media will specify the media source.\n\t+ source-dataset will highlight the source dataset of the content in each record.", "### Data Splits\n\n\nThe dataset contains a single split, which is 'train'." ]
[ "TAGS\n#region-us \n", "### Dataset Summary\n\n\nThis dataset contains:\n\n\n* A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\\_huggingface' method in Argilla.\n* Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\\_huggingface' and can be loaded independently using the 'datasets' library via 'load\\_dataset'.", "### Load with Argilla\n\n\nTo load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code:", "### Load with 'datasets'\n\n\nTo load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code:", "### Supported Tasks\n\n\n* Multi-modal classification\n* Multi-modal transcription\n\n\nDataset Structure\n-----------------", "### Data in Argilla\n\n\nThe dataset is created in Argilla with: fields, questions, suggestions, metadata, and guidelines.\n\n\nThe fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\nThe questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\\_selection, multi\\_label\\_selection, or ranking.\n\n\n\nThe suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending \"-suggestion\" and \"-suggestion-metadata\" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with \"-suggestion\" and the metadata is appended with \"-suggestion-metadata\".\n\n\nNEW The metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n\n\nThe guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section.", "#### Data in \"multi-modal\" Dataset\n\n\n* Fields: These are the records, each of them is a video, audio or image file encoded in base64.\n\n\n\t+ text is of type 'text'.\n* Questions: These are the questions that should be annotated.\n\n\n\t+ TextQuestion is a feature to describe the content in detail.\n\t+ RatingQuestion will allow us to rate the content's quality effectively.\n\t+ LabelQuestion is for tagging the content with the most suitable age group.\n* Metadata: Three metadata properties are added to streamline content management.\n\n\n\t+ groups is to identify the assigned annotator group.\n\t+ media will specify the media source.\n\t+ source-dataset will highlight the source dataset of the content in each record.", "### Data Splits\n\n\nThe dataset contains a single split, which is 'train'." ]
[ 6, 133, 40, 53, 27, 402, 166, 21 ]
[ "passage: TAGS\n#region-us \n### Dataset Summary\n\n\nThis dataset contains:\n\n\n* A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\\_huggingface' method in Argilla.\n* Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\\_huggingface' and can be loaded independently using the 'datasets' library via 'load\\_dataset'.### Load with Argilla\n\n\nTo load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code:### Load with 'datasets'\n\n\nTo load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code:### Supported Tasks\n\n\n* Multi-modal classification\n* Multi-modal transcription\n\n\nDataset Structure\n-----------------" ]
7ea09b00b7c0c6c3e82ca8063e86b12473e33b3e
# Dataset Card for "patent_v3.1_switched" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
nguyenthanhdo/patent_v3.1_switched
[ "region:us" ]
2023-11-16T07:24:33+00:00
{"dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "lang", "dtype": "string"}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 121149124.95088126, "num_examples": 100488}], "download_size": 81169121, "dataset_size": 121149124.95088126}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-16T07:24:44+00:00
[]
[]
TAGS #region-us
# Dataset Card for "patent_v3.1_switched" More Information needed
[ "# Dataset Card for \"patent_v3.1_switched\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"patent_v3.1_switched\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"patent_v3.1_switched\"\n\nMore Information needed" ]
027c46e917c54e1fe64475d7959c0e46b4707169
# Dataset Card for "medical_meadow_wikidoc_10k" **数据集名称:** *medalpaca/medical_meadow_wikidoc* **数据集原型来源:** *https://huggingface.co/datasets/medalpaca/medical_meadow_wikidoc* **数据规模:** *10K* **数据生成:** *基于维基文档由GPT-3.5-turbo改写* **数据领域:** *QA医学知识* [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
xDAN-datasets/medical_meadow_wikidoc_10k
[ "region:us" ]
2023-11-16T07:35:29+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "conversations", "list": [{"name": "from", "dtype": "string"}, {"name": "value", "dtype": "string"}]}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 20026284, "num_examples": 9998}], "download_size": 11247022, "dataset_size": 20026284}}
2023-11-17T08:33:58+00:00
[]
[]
TAGS #region-us
# Dataset Card for "medical_meadow_wikidoc_10k" 数据集名称: *medalpaca/medical_meadow_wikidoc* 数据集原型来源: *URL 数据规模: *10K* 数据生成: *基于维基文档由GPT-3.5-turbo改写* 数据领域: *QA医学知识* More Information needed
[ "# Dataset Card for \"medical_meadow_wikidoc_10k\"\n\n数据集名称: \n*medalpaca/medical_meadow_wikidoc*\n\n数据集原型来源: \n*URL\n\n\n数据规模: \n*10K*\n\n数据生成: \n*基于维基文档由GPT-3.5-turbo改写*\n\n数据领域: \n*QA医学知识*\n\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"medical_meadow_wikidoc_10k\"\n\n数据集名称: \n*medalpaca/medical_meadow_wikidoc*\n\n数据集原型来源: \n*URL\n\n\n数据规模: \n*10K*\n\n数据生成: \n*基于维基文档由GPT-3.5-turbo改写*\n\n数据领域: \n*QA医学知识*\n\n\nMore Information needed" ]
[ 6, 87 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"medical_meadow_wikidoc_10k\"\n\n数据集名称: \n*medalpaca/medical_meadow_wikidoc*\n\n数据集原型来源: \n*URL\n\n\n数据规模: \n*10K*\n\n数据生成: \n*基于维基文档由GPT-3.5-turbo改写*\n\n数据领域: \n*QA医学知识*\n\n\nMore Information needed" ]
21e112b82f64cd25d4087139c85b27b2ed836e93
# Dataset Card for "all-nli-NOB" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tollefj/all-nli-NOB
[ "task_categories:sentence-similarity", "language:nb", "language:no", "license:cc-by-4.0", "region:us" ]
2023-11-16T07:38:54+00:00
{"language": ["nb", "no"], "license": "cc-by-4.0", "task_categories": ["sentence-similarity"], "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "premise", "dtype": "string"}, {"name": "hypothesis", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 142671221, "num_examples": 942854}], "download_size": 67856445, "dataset_size": 142671221}}
2024-01-06T12:27:25+00:00
[]
[ "nb", "no" ]
TAGS #task_categories-sentence-similarity #language-Norwegian Bokmål #language-Norwegian #license-cc-by-4.0 #region-us
# Dataset Card for "all-nli-NOB" More Information needed
[ "# Dataset Card for \"all-nli-NOB\"\n\nMore Information needed" ]
[ "TAGS\n#task_categories-sentence-similarity #language-Norwegian Bokmål #language-Norwegian #license-cc-by-4.0 #region-us \n", "# Dataset Card for \"all-nli-NOB\"\n\nMore Information needed" ]
[ 42, 17 ]
[ "passage: TAGS\n#task_categories-sentence-similarity #language-Norwegian Bokmål #language-Norwegian #license-cc-by-4.0 #region-us \n# Dataset Card for \"all-nli-NOB\"\n\nMore Information needed" ]
fdb2360eec36dc7ff5b5be10895b91fc143d4cb7
# Dataset Card for Dataset Name <!-- Provide a quick summary of the dataset. --> ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
P051T1V3/health-demo
[ "region:us" ]
2023-11-16T07:53:27+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data.csv"}]}]}
2023-12-17T19:58:54+00:00
[]
[]
TAGS #region-us
# Dataset Card for Dataset Name ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Dataset Name", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Dataset Name", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 8, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Dataset Name## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
8713d8910542641791b9a563c927a65007286e37
# Dataset Card for Hindi Chat We know that current English-first LLMs don’t work well for many other languages, both in terms of performance, latency, and speed. Building instruction datasets for non-English languages is an important challenge that needs to be solved. Dedicated towards addressing this problem, I release 2 new datasets [rishiraj/bengalichat](https://huggingface.co/datasets/rishiraj/bengalichat/) & [rishiraj/hindichat](https://huggingface.co/datasets/rishiraj/hindichat/) of 10,000 instructions and demonstrations each. This data can be used for supervised fine-tuning (SFT) to make language multilingual models follow instructions better. ### Dataset Summary [rishiraj/hindichat](https://huggingface.co/datasets/rishiraj/hindichat/) was modelled after the instruction dataset described in OpenAI's [InstructGPT paper](https://huggingface.co/papers/2203.02155), and is translated from [HuggingFaceH4/no_robots](https://huggingface.co/datasets/HuggingFaceH4/no_robots/) which comprised mostly of single-turn instructions across the following categories: | Category | Count | |:-----------|--------:| | Generation | 4560 | | Open QA | 1240 | | Brainstorm | 1120 | | Chat | 850 | | Rewrite | 660 | | Summarize | 420 | | Coding | 350 | | Classify | 350 | | Closed QA | 260 | | Extract | 190 | ### Languages The data in [rishiraj/hindichat](https://huggingface.co/datasets/rishiraj/hindichat/) are in Hindi (BCP-47 hi). ### Data Fields The data fields are as follows: * `prompt`: Describes the task the model should perform. * `prompt_id`: A unique ID for the prompt. * `messages`: An array of messages, where each message indicates the role (system, user, assistant) and the content. * `category`: Which category the example belongs to (e.g. `Chat` or `Coding`). * `text`: Content of `messages` in a format that is compatible with dataset_text_field of SFTTrainer. ### Data Splits | | train_sft | test_sft | |---------------|------:| ---: | | hindichat | 9500 | 500 | ### Licensing Information The dataset is available under the [Creative Commons NonCommercial (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/legalcode). ### Citation Information ``` @misc{hindichat, author = {Rishiraj Acharya}, title = {Hindi Chat}, year = {2023}, publisher = {Hugging Face}, journal = {Hugging Face repository}, howpublished = {\url{https://huggingface.co/datasets/rishiraj/hindichat}} } ```
rishiraj/hindichat
[ "task_categories:conversational", "task_categories:text-generation", "language:hi", "license:cc-by-nc-4.0", "arxiv:2203.02155", "region:us" ]
2023-11-16T08:14:13+00:00
{"language": ["hi"], "license": "cc-by-nc-4.0", "task_categories": ["conversational", "text-generation"], "pretty_name": "Hindi Chat", "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "prompt_id", "dtype": "string"}, {"name": "messages", "list": [{"name": "content", "dtype": "string"}, {"name": "role", "dtype": "string"}]}, {"name": "category", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 64144365, "num_examples": 9500}, {"name": "test", "num_bytes": 3455962, "num_examples": 500}], "download_size": 27275492, "dataset_size": 67600327}}
2023-11-16T09:14:59+00:00
[ "2203.02155" ]
[ "hi" ]
TAGS #task_categories-conversational #task_categories-text-generation #language-Hindi #license-cc-by-nc-4.0 #arxiv-2203.02155 #region-us
Dataset Card for Hindi Chat =========================== We know that current English-first LLMs don’t work well for many other languages, both in terms of performance, latency, and speed. Building instruction datasets for non-English languages is an important challenge that needs to be solved. Dedicated towards addressing this problem, I release 2 new datasets rishiraj/bengalichat & rishiraj/hindichat of 10,000 instructions and demonstrations each. This data can be used for supervised fine-tuning (SFT) to make language multilingual models follow instructions better. ### Dataset Summary rishiraj/hindichat was modelled after the instruction dataset described in OpenAI's InstructGPT paper, and is translated from HuggingFaceH4/no\_robots which comprised mostly of single-turn instructions across the following categories: ### Languages The data in rishiraj/hindichat are in Hindi (BCP-47 hi). ### Data Fields The data fields are as follows: * 'prompt': Describes the task the model should perform. * 'prompt\_id': A unique ID for the prompt. * 'messages': An array of messages, where each message indicates the role (system, user, assistant) and the content. * 'category': Which category the example belongs to (e.g. 'Chat' or 'Coding'). * 'text': Content of 'messages' in a format that is compatible with dataset\_text\_field of SFTTrainer. ### Data Splits ### Licensing Information The dataset is available under the Creative Commons NonCommercial (CC BY-NC 4.0).
[ "### Dataset Summary\n\n\nrishiraj/hindichat was modelled after the instruction dataset described in OpenAI's InstructGPT paper, and is translated from HuggingFaceH4/no\\_robots which comprised mostly of single-turn instructions across the following categories:", "### Languages\n\n\nThe data in rishiraj/hindichat are in Hindi (BCP-47 hi).", "### Data Fields\n\n\nThe data fields are as follows:\n\n\n* 'prompt': Describes the task the model should perform.\n* 'prompt\\_id': A unique ID for the prompt.\n* 'messages': An array of messages, where each message indicates the role (system, user, assistant) and the content.\n* 'category': Which category the example belongs to (e.g. 'Chat' or 'Coding').\n* 'text': Content of 'messages' in a format that is compatible with dataset\\_text\\_field of SFTTrainer.", "### Data Splits", "### Licensing Information\n\n\nThe dataset is available under the Creative Commons NonCommercial (CC BY-NC 4.0)." ]
[ "TAGS\n#task_categories-conversational #task_categories-text-generation #language-Hindi #license-cc-by-nc-4.0 #arxiv-2203.02155 #region-us \n", "### Dataset Summary\n\n\nrishiraj/hindichat was modelled after the instruction dataset described in OpenAI's InstructGPT paper, and is translated from HuggingFaceH4/no\\_robots which comprised mostly of single-turn instructions across the following categories:", "### Languages\n\n\nThe data in rishiraj/hindichat are in Hindi (BCP-47 hi).", "### Data Fields\n\n\nThe data fields are as follows:\n\n\n* 'prompt': Describes the task the model should perform.\n* 'prompt\\_id': A unique ID for the prompt.\n* 'messages': An array of messages, where each message indicates the role (system, user, assistant) and the content.\n* 'category': Which category the example belongs to (e.g. 'Chat' or 'Coding').\n* 'text': Content of 'messages' in a format that is compatible with dataset\\_text\\_field of SFTTrainer.", "### Data Splits", "### Licensing Information\n\n\nThe dataset is available under the Creative Commons NonCommercial (CC BY-NC 4.0)." ]
[ 50, 66, 22, 139, 5, 26 ]
[ "passage: TAGS\n#task_categories-conversational #task_categories-text-generation #language-Hindi #license-cc-by-nc-4.0 #arxiv-2203.02155 #region-us \n### Dataset Summary\n\n\nrishiraj/hindichat was modelled after the instruction dataset described in OpenAI's InstructGPT paper, and is translated from HuggingFaceH4/no\\_robots which comprised mostly of single-turn instructions across the following categories:### Languages\n\n\nThe data in rishiraj/hindichat are in Hindi (BCP-47 hi).### Data Fields\n\n\nThe data fields are as follows:\n\n\n* 'prompt': Describes the task the model should perform.\n* 'prompt\\_id': A unique ID for the prompt.\n* 'messages': An array of messages, where each message indicates the role (system, user, assistant) and the content.\n* 'category': Which category the example belongs to (e.g. 'Chat' or 'Coding').\n* 'text': Content of 'messages' in a format that is compatible with dataset\\_text\\_field of SFTTrainer.### Data Splits### Licensing Information\n\n\nThe dataset is available under the Creative Commons NonCommercial (CC BY-NC 4.0)." ]
08da09a74a0b0277a27a6ca77a93678855b0c172
# Dataset Card for "LaMini-Instruction-Indonesian-Google-Translated" This dataset is on development: the are miss translation in some question answering case like *`please add whitespaces to this text: iwanttoplayfootball`*. It will be translated to *`harap tambahkan spasi pada teks ini: iwanttoplayfootball`* or translated but the whitespaces exist *`harap tambahkan spasi pada teks ini: saya ingin bermain sepak bola`*
hanifabdlh/LaMini-Instruction-Indonesian-Google-Translated
[ "task_categories:text2text-generation", "size_categories:1M<n<10M", "language:id", "license:mit", "region:us" ]
2023-11-16T08:27:49+00:00
{"language": ["id"], "license": "mit", "size_categories": ["1M<n<10M"], "task_categories": ["text2text-generation"], "pretty_name": "LaMini Instruction Indonesian Google Translated", "dataset_info": {"features": [{"name": "context", "dtype": "string"}, {"name": "instruction", "dtype": "string"}, {"name": "response", "dtype": "string"}, {"name": "instruction_source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1256346047, "num_examples": 2585615}], "download_size": 709717141, "dataset_size": 1256346047}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-16T08:44:02+00:00
[]
[ "id" ]
TAGS #task_categories-text2text-generation #size_categories-1M<n<10M #language-Indonesian #license-mit #region-us
# Dataset Card for "LaMini-Instruction-Indonesian-Google-Translated" This dataset is on development: the are miss translation in some question answering case like *'please add whitespaces to this text: iwanttoplayfootball'*. It will be translated to *'harap tambahkan spasi pada teks ini: iwanttoplayfootball'* or translated but the whitespaces exist *'harap tambahkan spasi pada teks ini: saya ingin bermain sepak bola'*
[ "# Dataset Card for \"LaMini-Instruction-Indonesian-Google-Translated\"\n\nThis dataset is on development: the are miss translation in some question answering case like *'please add whitespaces to this text: iwanttoplayfootball'*. It will be translated to *'harap tambahkan spasi pada teks ini: iwanttoplayfootball'* or translated but the whitespaces exist *'harap tambahkan spasi pada teks ini: saya ingin bermain sepak bola'*" ]
[ "TAGS\n#task_categories-text2text-generation #size_categories-1M<n<10M #language-Indonesian #license-mit #region-us \n", "# Dataset Card for \"LaMini-Instruction-Indonesian-Google-Translated\"\n\nThis dataset is on development: the are miss translation in some question answering case like *'please add whitespaces to this text: iwanttoplayfootball'*. It will be translated to *'harap tambahkan spasi pada teks ini: iwanttoplayfootball'* or translated but the whitespaces exist *'harap tambahkan spasi pada teks ini: saya ingin bermain sepak bola'*" ]
[ 41, 113 ]
[ "passage: TAGS\n#task_categories-text2text-generation #size_categories-1M<n<10M #language-Indonesian #license-mit #region-us \n# Dataset Card for \"LaMini-Instruction-Indonesian-Google-Translated\"\n\nThis dataset is on development: the are miss translation in some question answering case like *'please add whitespaces to this text: iwanttoplayfootball'*. It will be translated to *'harap tambahkan spasi pada teks ini: iwanttoplayfootball'* or translated but the whitespaces exist *'harap tambahkan spasi pada teks ini: saya ingin bermain sepak bola'*" ]
e5fb4f4032e8d812a3d14d6dd886f530eb42a766
# Dataset Card for "medical_meadow_wikidoc_patient_information_6k" **数据集名称:** *medalpaca/medical_meadow_wikidoc_patient_information* **数据集原型来源:** *https://huggingface.co/datasets/medalpaca/medical_meadow_wikidoc_patient_information* **数据规模:** *6k* **数据生成:** *基于维基文档由GPT-3.5-turbo改写* **数据领域:** *QA(生活教科书”和“患者信息)* [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
xDAN-datasets/medical_meadow_wikidoc_patient_information_6k
[ "region:us" ]
2023-11-16T08:28:23+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "conversations", "list": [{"name": "from", "dtype": "string"}, {"name": "value", "dtype": "string"}]}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 6158724, "num_examples": 5850}], "download_size": 3123806, "dataset_size": 6158724}}
2023-11-20T07:50:10+00:00
[]
[]
TAGS #region-us
# Dataset Card for "medical_meadow_wikidoc_patient_information_6k" 数据集名称: *medalpaca/medical_meadow_wikidoc_patient_information* 数据集原型来源: *URL 数据规模: *6k* 数据生成: *基于维基文档由GPT-3.5-turbo改写* 数据领域: *QA(生活教科书”和“患者信息)* More Information needed
[ "# Dataset Card for \"medical_meadow_wikidoc_patient_information_6k\"\n\n数据集名称: \n*medalpaca/medical_meadow_wikidoc_patient_information*\n\n数据集原型来源: \n*URL\n\n\n数据规模: \n*6k*\n\n数据生成: \n*基于维基文档由GPT-3.5-turbo改写*\n\n数据领域: \n*QA(生活教科书”和“患者信息)*\n\n\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"medical_meadow_wikidoc_patient_information_6k\"\n\n数据集名称: \n*medalpaca/medical_meadow_wikidoc_patient_information*\n\n数据集原型来源: \n*URL\n\n\n数据规模: \n*6k*\n\n数据生成: \n*基于维基文档由GPT-3.5-turbo改写*\n\n数据领域: \n*QA(生活教科书”和“患者信息)*\n\n\n\nMore Information needed" ]
[ 6, 104 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"medical_meadow_wikidoc_patient_information_6k\"\n\n数据集名称: \n*medalpaca/medical_meadow_wikidoc_patient_information*\n\n数据集原型来源: \n*URL\n\n\n数据规模: \n*6k*\n\n数据生成: \n*基于维基文档由GPT-3.5-turbo改写*\n\n数据领域: \n*QA(生活教科书”和“患者信息)*\n\n\n\nMore Information needed" ]
e8a413720c50cc840fb23f0f88b055b05f5b8daf
# Dataset Card for "classification_dialogue_search_v0.2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Data-Lab/classification_dialogue_search_v0.2
[ "region:us" ]
2023-11-16T08:35:26+00:00
{"dataset_info": {"features": [{"name": "query", "dtype": "string"}, {"name": "ner", "dtype": "string"}, {"name": "gold", "dtype": "int64"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 924613, "num_examples": 5742}], "download_size": 338455, "dataset_size": 924613}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-16T08:35:28+00:00
[]
[]
TAGS #region-us
# Dataset Card for "classification_dialogue_search_v0.2" More Information needed
[ "# Dataset Card for \"classification_dialogue_search_v0.2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"classification_dialogue_search_v0.2\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"classification_dialogue_search_v0.2\"\n\nMore Information needed" ]
aca874006332c08e5d8029bf81fee17e8b504a22
# Dataset Card for "medical_meadow_mediqa_2k" **数据集名称:** *medalpaca/medical_meadow_mediqa* **数据集原型来源:** *https://huggingface.co/datasets/medalpaca/medical_meadow_mediqa* **数据规模:** *2k* **数据生成:** *人工生成* **数据领域:** *医患对话* [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
xDAN-datasets/medical_meadow_mediqa_2k
[ "region:us" ]
2023-11-16T08:53:19+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "conversations", "list": [{"name": "from", "dtype": "string"}, {"name": "value", "dtype": "string"}]}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 28533080, "num_examples": 2054}], "download_size": 0, "dataset_size": 28533080}}
2023-11-20T07:48:23+00:00
[]
[]
TAGS #region-us
# Dataset Card for "medical_meadow_mediqa_2k" 数据集名称: *medalpaca/medical_meadow_mediqa* 数据集原型来源: *URL 数据规模: *2k* 数据生成: *人工生成* 数据领域: *医患对话* More Information needed
[ "# Dataset Card for \"medical_meadow_mediqa_2k\"\n\n数据集名称: \n*medalpaca/medical_meadow_mediqa*\n\n数据集原型来源: \n*URL\n\n\n数据规模: \n*2k*\n\n数据生成: \n*人工生成*\n\n数据领域: \n*医患对话*\n\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"medical_meadow_mediqa_2k\"\n\n数据集名称: \n*medalpaca/medical_meadow_mediqa*\n\n数据集原型来源: \n*URL\n\n\n数据规模: \n*2k*\n\n数据生成: \n*人工生成*\n\n数据领域: \n*医患对话*\n\n\nMore Information needed" ]
[ 6, 74 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"medical_meadow_mediqa_2k\"\n\n数据集名称: \n*medalpaca/medical_meadow_mediqa*\n\n数据集原型来源: \n*URL\n\n\n数据规模: \n*2k*\n\n数据生成: \n*人工生成*\n\n数据领域: \n*医患对话*\n\n\nMore Information needed" ]
c617754d571cd1862a259f15b6239086212c637f
# Dataset Card for "law_court_opinion_nonlegal_2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
zxvix/law_court_opinion_nonlegal_2
[ "region:us" ]
2023-11-16T09:01:52+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "original_text", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 184432.0, "num_examples": 100}], "download_size": 120486, "dataset_size": 184432.0}}
2023-11-16T09:01:56+00:00
[]
[]
TAGS #region-us
# Dataset Card for "law_court_opinion_nonlegal_2" More Information needed
[ "# Dataset Card for \"law_court_opinion_nonlegal_2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"law_court_opinion_nonlegal_2\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"law_court_opinion_nonlegal_2\"\n\nMore Information needed" ]
5bb6e5c4d8c23c85331ee04005722ecff3bc5c63
# Dataset Card - This dataset was created solely for the purpose of code testing. - This dataset was generated from prompting chatGPT to create sample pieces of news setences according to a topic. - Sample prompt: "generate 50 sentences on the topic of "very recent breaking news on wars and conflicts events" with some sample location names. One example: "a missile struck near a residential building in Kiev last night, Russia denied Ukraine's accusations of attacking non-military targets"" - The output senteces were then used to construct huggingface dataset. [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
joshuapsa/gpt-generated-news-sentences
[ "task_categories:text-classification", "size_categories:1K<n<10K", "language:en", "license:mit", "region:us" ]
2023-11-16T09:11:30+00:00
{"language": ["en"], "license": "mit", "size_categories": ["1K<n<10K"], "task_categories": ["text-classification"], "dataset_info": {"features": [{"name": "class_index", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}, {"name": "text", "dtype": "string"}, {"name": "_air", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}, {"name": "_cybersecurity", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}, {"name": "_domestic_unrest_violence", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}, {"name": "_extreme_weather", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}, {"name": "_forced_labor", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}, {"name": "_general_biz_trend", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}, {"name": "_later_report", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}, {"name": "_lawsuit_legal_insurance", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}, {"name": "_leisure_other_news", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}, {"name": "_maritime", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}, {"name": "_pandemics_large_scale_diseases", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}, {"name": "_railway", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}, {"name": "_strike", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}, {"name": "_trade_war_embargos_bans", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}, {"name": "_war_conflict", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}, {"name": "_warehouse_fire", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 266620, "num_examples": 640}, {"name": "valid", "num_bytes": 33348, "num_examples": 80}, {"name": "test", "num_bytes": 33277, "num_examples": 80}], "download_size": 100323, "dataset_size": 333245}}
2023-11-16T09:43:33+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #size_categories-1K<n<10K #language-English #license-mit #region-us
# Dataset Card - This dataset was created solely for the purpose of code testing. - This dataset was generated from prompting chatGPT to create sample pieces of news setences according to a topic. - Sample prompt: "generate 50 sentences on the topic of "very recent breaking news on wars and conflicts events" with some sample location names. One example: "a missile struck near a residential building in Kiev last night, Russia denied Ukraine's accusations of attacking non-military targets"" - The output senteces were then used to construct huggingface dataset. More Information needed
[ "# Dataset Card\n- This dataset was created solely for the purpose of code testing.\n- This dataset was generated from prompting chatGPT to create sample pieces of news setences according to a topic.\n- Sample prompt: \"generate 50 sentences on the topic of \"very recent breaking news on wars and conflicts events\" with some sample location names. One example: \"a missile struck near a residential building in Kiev last night, Russia denied Ukraine's accusations of attacking non-military targets\"\"\n- The output senteces were then used to construct huggingface dataset.\n \nMore Information needed" ]
[ "TAGS\n#task_categories-text-classification #size_categories-1K<n<10K #language-English #license-mit #region-us \n", "# Dataset Card\n- This dataset was created solely for the purpose of code testing.\n- This dataset was generated from prompting chatGPT to create sample pieces of news setences according to a topic.\n- Sample prompt: \"generate 50 sentences on the topic of \"very recent breaking news on wars and conflicts events\" with some sample location names. One example: \"a missile struck near a residential building in Kiev last night, Russia denied Ukraine's accusations of attacking non-military targets\"\"\n- The output senteces were then used to construct huggingface dataset.\n \nMore Information needed" ]
[ 38, 137 ]
[ "passage: TAGS\n#task_categories-text-classification #size_categories-1K<n<10K #language-English #license-mit #region-us \n# Dataset Card\n- This dataset was created solely for the purpose of code testing.\n- This dataset was generated from prompting chatGPT to create sample pieces of news setences according to a topic.\n- Sample prompt: \"generate 50 sentences on the topic of \"very recent breaking news on wars and conflicts events\" with some sample location names. One example: \"a missile struck near a residential building in Kiev last night, Russia denied Ukraine's accusations of attacking non-military targets\"\"\n- The output senteces were then used to construct huggingface dataset.\n \nMore Information needed" ]