sha
stringlengths
40
40
text
stringlengths
1
13.4M
id
stringlengths
2
117
tags
listlengths
1
7.91k
created_at
stringlengths
25
25
metadata
stringlengths
2
875k
last_modified
stringlengths
25
25
arxiv
listlengths
0
25
languages
listlengths
0
7.91k
tags_str
stringlengths
17
159k
text_str
stringlengths
1
447k
text_lists
listlengths
0
352
processed_texts
listlengths
1
353
tokens_length
listlengths
1
353
input_texts
listlengths
1
40
be36bcf3a95ea4ba008eca9388059ecb2dbf597e
# Dataset Card for "korean-general-command-voice_6000-12000_samplingRate-16000_for_test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
jiwon65/aihub_general_6000_for_test
[ "region:us" ]
2023-10-19T14:24:08+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "audio", "sequence": "float32"}], "splits": [{"name": "train", "num_bytes": 1055536321, "num_examples": 6000}], "download_size": 898095664, "dataset_size": 1055536321}}
2023-10-19T14:28:20+00:00
[]
[]
TAGS #region-us
# Dataset Card for "korean-general-command-voice_6000-12000_samplingRate-16000_for_test" More Information needed
[ "# Dataset Card for \"korean-general-command-voice_6000-12000_samplingRate-16000_for_test\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"korean-general-command-voice_6000-12000_samplingRate-16000_for_test\"\n\nMore Information needed" ]
[ 6, 36 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"korean-general-command-voice_6000-12000_samplingRate-16000_for_test\"\n\nMore Information needed" ]
6888427348d9e8b6c053a358010bab9164036e46
# Dataset Card for "bk-sdm-small_generated_images_pokemon_blip" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Isamu136/bk-sdm-small_generated_images_pokemon_blip
[ "region:us" ]
2023-10-19T14:25:22+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}, {"name": "seed", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 33954051.0, "num_examples": 833}], "download_size": 33930907, "dataset_size": 33954051.0}}
2023-10-19T14:26:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "bk-sdm-small_generated_images_pokemon_blip" More Information needed
[ "# Dataset Card for \"bk-sdm-small_generated_images_pokemon_blip\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"bk-sdm-small_generated_images_pokemon_blip\"\n\nMore Information needed" ]
[ 6, 31 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"bk-sdm-small_generated_images_pokemon_blip\"\n\nMore Information needed" ]
6a0dafebb8c68301aae4f9cb83f48275d1027bc7
# Dataset Card for "guanaco-llama2-1k" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Slowblood/guanaco-llama2-1k
[ "region:us" ]
2023-10-19T14:40:56+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1654448, "num_examples": 1000}], "download_size": 966693, "dataset_size": 1654448}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-19T14:40:58+00:00
[]
[]
TAGS #region-us
# Dataset Card for "guanaco-llama2-1k" More Information needed
[ "# Dataset Card for \"guanaco-llama2-1k\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"guanaco-llama2-1k\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"guanaco-llama2-1k\"\n\nMore Information needed" ]
7204acfe12daefcd37b5df9e46fae74b91f1c39a
# Dataset Card for "JOSIE_v928.16" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Isaak-Carter/JOSIE_v928.16
[ "region:us" ]
2023-10-19T14:43:42+00:00
{"dataset_info": {"features": [{"name": "sample", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 6499831, "num_examples": 2348}], "download_size": 3066207, "dataset_size": 6499831}}
2023-10-19T14:43:46+00:00
[]
[]
TAGS #region-us
# Dataset Card for "JOSIE_v928.16" More Information needed
[ "# Dataset Card for \"JOSIE_v928.16\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"JOSIE_v928.16\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"JOSIE_v928.16\"\n\nMore Information needed" ]
8b5273837a6ad6ab9b225da695a3718080f86e95
# Dataset Card for Evaluation run of ehartford/Samantha-1.1-70b ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/ehartford/Samantha-1.1-70b - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [ehartford/Samantha-1.1-70b](https://huggingface.co/ehartford/Samantha-1.1-70b) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_ehartford__Samantha-1.1-70b", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-10-19T15:47:52.190208](https://huggingface.co/datasets/open-llm-leaderboard/details_ehartford__Samantha-1.1-70b/blob/main/results_2023-10-19T15-47-52.190208.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "em": 0.5110109060402684, "em_stderr": 0.00511922622901773, "f1": 0.5558829697986593, "f1_stderr": 0.004885906429154466, "acc": 0.5744121045517131, "acc_stderr": 0.011649119740139782 }, "harness|drop|3": { "em": 0.5110109060402684, "em_stderr": 0.00511922622901773, "f1": 0.5558829697986593, "f1_stderr": 0.004885906429154466 }, "harness|gsm8k|5": { "acc": 0.3161485974222896, "acc_stderr": 0.012807630673451488 }, "harness|winogrande|5": { "acc": 0.8326756116811366, "acc_stderr": 0.010490608806828079 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_ehartford__Samantha-1.1-70b
[ "region:us" ]
2023-10-19T14:47:56+00:00
{"pretty_name": "Evaluation run of ehartford/Samantha-1.1-70b", "dataset_summary": "Dataset automatically created during the evaluation run of model [ehartford/Samantha-1.1-70b](https://huggingface.co/ehartford/Samantha-1.1-70b) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_ehartford__Samantha-1.1-70b\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-10-19T15:47:52.190208](https://huggingface.co/datasets/open-llm-leaderboard/details_ehartford__Samantha-1.1-70b/blob/main/results_2023-10-19T15-47-52.190208.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"em\": 0.5110109060402684,\n \"em_stderr\": 0.00511922622901773,\n \"f1\": 0.5558829697986593,\n \"f1_stderr\": 0.004885906429154466,\n \"acc\": 0.5744121045517131,\n \"acc_stderr\": 0.011649119740139782\n },\n \"harness|drop|3\": {\n \"em\": 0.5110109060402684,\n \"em_stderr\": 0.00511922622901773,\n \"f1\": 0.5558829697986593,\n \"f1_stderr\": 0.004885906429154466\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.3161485974222896,\n \"acc_stderr\": 0.012807630673451488\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.8326756116811366,\n \"acc_stderr\": 0.010490608806828079\n }\n}\n```", "repo_url": "https://huggingface.co/ehartford/Samantha-1.1-70b", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_drop_3", "data_files": [{"split": "2023_10_19T15_47_52.190208", "path": ["**/details_harness|drop|3_2023-10-19T15-47-52.190208.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-10-19T15-47-52.190208.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_10_19T15_47_52.190208", "path": ["**/details_harness|gsm8k|5_2023-10-19T15-47-52.190208.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-10-19T15-47-52.190208.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_10_19T15_47_52.190208", "path": ["**/details_harness|winogrande|5_2023-10-19T15-47-52.190208.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-10-19T15-47-52.190208.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_10_19T15_47_52.190208", "path": ["results_2023-10-19T15-47-52.190208.parquet"]}, {"split": "latest", "path": ["results_2023-10-19T15-47-52.190208.parquet"]}]}]}
2023-10-19T14:48:05+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of ehartford/Samantha-1.1-70b ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model ehartford/Samantha-1.1-70b on the Open LLM Leaderboard. The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-10-19T15:47:52.190208(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of ehartford/Samantha-1.1-70b", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model ehartford/Samantha-1.1-70b on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-19T15:47:52.190208(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of ehartford/Samantha-1.1-70b", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model ehartford/Samantha-1.1-70b on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-19T15:47:52.190208(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 19, 31, 167, 66, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of ehartford/Samantha-1.1-70b## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model ehartford/Samantha-1.1-70b on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-10-19T15:47:52.190208(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
c4731bf7c0a7e687546a21dbdfd0403310b5a37c
# Dataset Card for "find_first_sent_train_100_eval_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_first_sent_train_100_eval_10
[ "region:us" ]
2023-10-19T14:56:50+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 267331, "num_examples": 210}, {"name": "validation", "num_bytes": 10399, "num_examples": 10}], "download_size": 135617, "dataset_size": 277730}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-10-31T14:48:31+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_first_sent_train_100_eval_10" More Information needed
[ "# Dataset Card for \"find_first_sent_train_100_eval_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_first_sent_train_100_eval_10\"\n\nMore Information needed" ]
[ 6, 26 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_first_sent_train_100_eval_10\"\n\nMore Information needed" ]
793dfcd290333e2ea63b4c6d86d459f95b2036de
# Dataset Card for "find_second_sent_train_100_eval_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_second_sent_train_100_eval_10
[ "region:us" ]
2023-10-19T14:56:56+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 265914, "num_examples": 210}, {"name": "validation", "num_bytes": 9977, "num_examples": 10}], "download_size": 135955, "dataset_size": 275891}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-10-31T14:48:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_second_sent_train_100_eval_10" More Information needed
[ "# Dataset Card for \"find_second_sent_train_100_eval_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_second_sent_train_100_eval_10\"\n\nMore Information needed" ]
[ 6, 25 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_second_sent_train_100_eval_10\"\n\nMore Information needed" ]
64e1a8cf85fe8bc48e7e0176537b3324e00c654b
# Dataset Card for "find_last_sent_train_100_eval_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_last_sent_train_100_eval_10
[ "region:us" ]
2023-10-19T14:57:02+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 266305, "num_examples": 210}, {"name": "validation", "num_bytes": 10271, "num_examples": 10}], "download_size": 136034, "dataset_size": 276576}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-10-31T14:48:47+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_last_sent_train_100_eval_10" More Information needed
[ "# Dataset Card for \"find_last_sent_train_100_eval_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_last_sent_train_100_eval_10\"\n\nMore Information needed" ]
[ 6, 25 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_last_sent_train_100_eval_10\"\n\nMore Information needed" ]
247606d8ba295f61fbe8e5c146f12d8a1db82a3f
# Dataset Card for "cover-letter-dataset-text-prompt" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
kwanyick/cover-letter-dataset-text-prompt
[ "region:us" ]
2023-10-19T14:59:46+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1231557.1678141137, "num_examples": 813}, {"name": "test", "num_bytes": 528675.8321858865, "num_examples": 349}], "download_size": 594129, "dataset_size": 1760233.0}}
2023-10-19T15:00:37+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cover-letter-dataset-text-prompt" More Information needed
[ "# Dataset Card for \"cover-letter-dataset-text-prompt\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cover-letter-dataset-text-prompt\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cover-letter-dataset-text-prompt\"\n\nMore Information needed" ]
35edec976b579694d4c0841242cc0ea2eb641e92
# Plane wave raw ultrasound simualted data for deep learning speed of sound inversion Cite this dataset as: Feigin M, Freedman D, Anthony BW. Computing Speed-of-Sound from ultrasound: user-agnostic recovery and a new benchmark. IEEE Trans Biomed Eng. 2023; doi:10.1109/TBME.2023.3327147 ## Simulation The full dataset consists of 112640 simulations split into 9216 simulations in the training set, 1024 in the validation set, and 1024 in the test set. The measured signal is simulated using the k-wave MATLAB toolbox. Simulations were performed for nine plane waves at \\(0\\), \\(\pm 8\\), \\(\pm 16\\), \\(\pm 24\\), and \\(\pm 32\\) element offsets, with corresponding wavefront angles of \\(0\\), \\(\pm 6.7\\), \\(\pm 13.7\\), \\(\pm 20.2\\), and \\(\pm 26.3\\) (the time delay is calculated based on 1540 m/s speed of sound so the actual angle will differ per sample), set to pass through the center of the domain. See the figures for details (three of the 9 plane waves are shown to reduce clutter). Each simulation was performed with two center frequencies, 2.5 MHz and 5 MHz, with a Gaussian window (pulse width) of 5 oscillations. An additional simulation at 4.4 MHz is available under the validation directory to allow testing for transfer learning. Each simulation comprised of \\(1152 \times 1152\\) random speed-of-sound and \\(\alpha\\) (attenuation) coefficient maps following power law attenuation [\\(\mbox{dB} / \mbox{cm} / \mbox{MHz}^2\\)] in a domain \\(42.35 \times 42.35\\) mm in size The domain is constructed by layering a randomly selected set of ellipses and half-planes. For each of the resulting domains (organs), we randomly selected the speed of sound, attenuation coefficient, speckle density, and speckle amplitude. Domains were verified to not slice the probe face; i.e. the resulting maps are verified not to have a discontinuity at the probe face. The speed of sound range is 1300 m/s to 1800 m/s. The \\(\alpha\\) coefficient range is \\(0.05\\) to \\(0.15\\) dB/cm/MHz\\({}^2\\). Background density is set to 0.9 g/cm\\({}^3\\) (density of fat). Speckle noise is randomly generated in the density domain so as not to affect the wavefront propagation speed (uniformly distributed point sources with 2-10 points per wavelength and uniformly distributed amplitude at \\(\pm 10\%\\)). ## Probe To match our physical hardware, we simulated a 128-element array with 64 active transmit elements. The simulation was carried out with two pulse center frequencies, 2.5 MHz and 5 MHz with a Gaussian window of 5 oscillations. The central plane wave (zero degrees) is centered at elements 33 to 96. The probe face is placed at \\(y = 60\\) (outside the perfectly matched layer) and centered on the \\(x\\) axis. The numerical receive array is 4 elements per sensor element, with a matching kerf (spacing) value, i.e., 4 on 4 off. The signal for each receiver is summed across the 4 receiver elements to generate the 128 receive channels, and the signal is down-sampled to a 40 MHz sampling rate (ADC rate). For the transmit signal, we use a continuous array, as we found that it better matches real-world signals, so for the centered plane wave, a source is placed on all pixels with \\(y = 60\\) and \\(322 \le x \le 830\\) with a zero time delay on all elements. ## File format The data is in Matlab v7.3 (HDF5) file format create by the python hdf5storage package. Fields in each file: - alpha_coeff: alpha coeffienct (attenuation) map {1024 - samples, 1channel, 1152 - X dimension, 1152 - Z dimension} - c0: speed of sound map {1024 - samples, 1, 1152 - X dimension, 1152 - Z dimension} - cycles: number of cycles in the acoustic wavelet - f: frequencies simulated - offsets: plane wave offsets - p_f<freq>_o<offset>: simulated ultrasound signal {1024 - samples, 1, 128 - reciever, 2667 - time sample} ![Simulation setup](images/simulation.png) the k-wave simulation setup. The US array is placed at line 60 of the numerical grid. Due to kerf, slightly less than half of the array (64 elements) is excited to generate the outgoing plane wave. To better match the actual signal and avoid artifacts, a continuous section is excited. The angle is set based on an assumed 1540 m/s speed of sound so that the plane wave overlaps the center of the domain ![Ultrasound array setup](images/array.png) Array structure, with 4 active elements and 4 kerf elements interleaved. The recorded signal is the average of the 4 receiving cells for each element The data simulates plane wave ultrasound data in random media
laughingrice/Ultrasound_planewave_sos_inversion
[ "license:mit", "medical imaging", "ultrasound", "doi:10.57967/hf/1240", "region:us" ]
2023-10-19T15:03:06+00:00
{"license": "mit", "tags": ["medical imaging", "ultrasound"], "Authors": ["Micha Feigin", "Daniel Freedman", "Brian W. Anthony"]}
2023-10-21T20:43:44+00:00
[]
[]
TAGS #license-mit #medical imaging #ultrasound #doi-10.57967/hf/1240 #region-us
# Plane wave raw ultrasound simualted data for deep learning speed of sound inversion Cite this dataset as: Feigin M, Freedman D, Anthony BW. Computing Speed-of-Sound from ultrasound: user-agnostic recovery and a new benchmark. IEEE Trans Biomed Eng. 2023; doi:10.1109/TBME.2023.3327147 ## Simulation The full dataset consists of 112640 simulations split into 9216 simulations in the training set, 1024 in the validation set, and 1024 in the test set. The measured signal is simulated using the k-wave MATLAB toolbox. Simulations were performed for nine plane waves at \\(0\\), \\(\pm 8\\), \\(\pm 16\\), \\(\pm 24\\), and \\(\pm 32\\) element offsets, with corresponding wavefront angles of \\(0\\), \\(\pm 6.7\\), \\(\pm 13.7\\), \\(\pm 20.2\\), and \\(\pm 26.3\\) (the time delay is calculated based on 1540 m/s speed of sound so the actual angle will differ per sample), set to pass through the center of the domain. See the figures for details (three of the 9 plane waves are shown to reduce clutter). Each simulation was performed with two center frequencies, 2.5 MHz and 5 MHz, with a Gaussian window (pulse width) of 5 oscillations. An additional simulation at 4.4 MHz is available under the validation directory to allow testing for transfer learning. Each simulation comprised of \\(1152 \times 1152\\) random speed-of-sound and \\(\alpha\\) (attenuation) coefficient maps following power law attenuation [\\(\mbox{dB} / \mbox{cm} / \mbox{MHz}^2\\)] in a domain \\(42.35 \times 42.35\\) mm in size The domain is constructed by layering a randomly selected set of ellipses and half-planes. For each of the resulting domains (organs), we randomly selected the speed of sound, attenuation coefficient, speckle density, and speckle amplitude. Domains were verified to not slice the probe face; i.e. the resulting maps are verified not to have a discontinuity at the probe face. The speed of sound range is 1300 m/s to 1800 m/s. The \\(\alpha\\) coefficient range is \\(0.05\\) to \\(0.15\\) dB/cm/MHz\\({}^2\\). Background density is set to 0.9 g/cm\\({}^3\\) (density of fat). Speckle noise is randomly generated in the density domain so as not to affect the wavefront propagation speed (uniformly distributed point sources with 2-10 points per wavelength and uniformly distributed amplitude at \\(\pm 10\%\\)). ## Probe To match our physical hardware, we simulated a 128-element array with 64 active transmit elements. The simulation was carried out with two pulse center frequencies, 2.5 MHz and 5 MHz with a Gaussian window of 5 oscillations. The central plane wave (zero degrees) is centered at elements 33 to 96. The probe face is placed at \\(y = 60\\) (outside the perfectly matched layer) and centered on the \\(x\\) axis. The numerical receive array is 4 elements per sensor element, with a matching kerf (spacing) value, i.e., 4 on 4 off. The signal for each receiver is summed across the 4 receiver elements to generate the 128 receive channels, and the signal is down-sampled to a 40 MHz sampling rate (ADC rate). For the transmit signal, we use a continuous array, as we found that it better matches real-world signals, so for the centered plane wave, a source is placed on all pixels with \\(y = 60\\) and \\(322 \le x \le 830\\) with a zero time delay on all elements. ## File format The data is in Matlab v7.3 (HDF5) file format create by the python hdf5storage package. Fields in each file: - alpha_coeff: alpha coeffienct (attenuation) map {1024 - samples, 1channel, 1152 - X dimension, 1152 - Z dimension} - c0: speed of sound map {1024 - samples, 1, 1152 - X dimension, 1152 - Z dimension} - cycles: number of cycles in the acoustic wavelet - f: frequencies simulated - offsets: plane wave offsets - p_f<freq>_o<offset>: simulated ultrasound signal {1024 - samples, 1, 128 - reciever, 2667 - time sample} !Simulation setup the k-wave simulation setup. The US array is placed at line 60 of the numerical grid. Due to kerf, slightly less than half of the array (64 elements) is excited to generate the outgoing plane wave. To better match the actual signal and avoid artifacts, a continuous section is excited. The angle is set based on an assumed 1540 m/s speed of sound so that the plane wave overlaps the center of the domain !Ultrasound array setup Array structure, with 4 active elements and 4 kerf elements interleaved. The recorded signal is the average of the 4 receiving cells for each element The data simulates plane wave ultrasound data in random media
[ "# Plane wave raw ultrasound simualted data for deep learning speed of sound inversion \n\nCite this dataset as:\n\nFeigin M, Freedman D, Anthony BW. Computing Speed-of-Sound from ultrasound: user-agnostic recovery and a new benchmark. IEEE Trans Biomed Eng. 2023; doi:10.1109/TBME.2023.3327147", "## Simulation\n\nThe full dataset consists of 112640 simulations split into 9216 simulations in the training set, 1024 in the validation set, and 1024 in the test set. The measured signal is simulated using the k-wave MATLAB toolbox. Simulations were performed for nine plane waves at \\\\(0\\\\), \\\\(\\pm 8\\\\), \\\\(\\pm 16\\\\), \\\\(\\pm 24\\\\), and \\\\(\\pm 32\\\\) element offsets, with corresponding wavefront angles of \\\\(0\\\\), \\\\(\\pm 6.7\\\\), \\\\(\\pm 13.7\\\\), \\\\(\\pm 20.2\\\\), and \\\\(\\pm 26.3\\\\) (the time delay is calculated based on 1540 m/s speed of sound so the actual angle will differ per sample), set to pass through the center of the domain. See the figures for details (three of the 9 plane waves are shown to reduce clutter). Each simulation was performed with two center frequencies, 2.5 MHz and 5 MHz, with a Gaussian window (pulse width) of 5 oscillations. An additional simulation at 4.4 MHz is available under the validation directory to allow testing for transfer learning.\n\nEach simulation comprised of \\\\(1152 \\times 1152\\\\) random speed-of-sound and \\\\(\\alpha\\\\) (attenuation) coefficient maps following power law attenuation [\\\\(\\mbox{dB} / \\mbox{cm} / \\mbox{MHz}^2\\\\)] in a domain \\\\(42.35 \\times 42.35\\\\) mm in size\n\nThe domain is constructed by layering a randomly selected set of ellipses and half-planes. For each of the resulting domains (organs), we randomly selected the speed of sound, attenuation coefficient, speckle density, and speckle amplitude. Domains were verified to not slice the probe face; i.e. the resulting maps are verified not to have a discontinuity at the probe face.\n\nThe speed of sound range is 1300 m/s to 1800 m/s. The \\\\(\\alpha\\\\) coefficient range is \\\\(0.05\\\\) to \\\\(0.15\\\\) dB/cm/MHz\\\\({}^2\\\\). Background density is set to 0.9 g/cm\\\\({}^3\\\\) (density of fat).\n\nSpeckle noise is randomly generated in the density domain so as not to affect the wavefront propagation speed (uniformly distributed point sources with 2-10 points per wavelength and uniformly distributed amplitude at \\\\(\\pm 10\\%\\\\)).", "## Probe\n\nTo match our physical hardware, we simulated a 128-element array with 64 active transmit elements. The simulation was carried out with two pulse center frequencies, 2.5 MHz and 5 MHz with a Gaussian window of 5 oscillations. \n\nThe central plane wave (zero degrees) is centered at elements 33 to 96. The probe face is placed at \\\\(y = 60\\\\) (outside the perfectly matched layer) and centered on the \\\\(x\\\\) axis. The numerical receive array is 4 elements per sensor element, with a matching kerf (spacing) value, i.e., 4 on 4 off. The signal for each receiver is summed across the 4 receiver elements to generate the 128 receive channels, and the signal is down-sampled to a 40 MHz sampling rate (ADC rate). For the transmit signal, we use a continuous array, as we found that it better matches real-world signals, so for the centered plane wave, a source is placed on all pixels with \\\\(y = 60\\\\) and \\\\(322 \\le x \\le 830\\\\) with a zero time delay on all elements.", "## File format\n\nThe data is in Matlab v7.3 (HDF5) file format create by the python hdf5storage package.\nFields in each file:\n\n- alpha_coeff: alpha coeffienct (attenuation) map {1024 - samples, 1channel, 1152 - X dimension, 1152 - Z dimension}\n- c0: speed of sound map {1024 - samples, 1, 1152 - X dimension, 1152 - Z dimension}\n- cycles: number of cycles in the acoustic wavelet\n- f: frequencies simulated\n- offsets: plane wave offsets\n- p_f<freq>_o<offset>: simulated ultrasound signal {1024 - samples, 1, 128 - reciever, 2667 - time sample}\n\n!Simulation setup\n\nthe k-wave simulation setup. The US array is placed at line 60 of the numerical grid. Due to kerf, slightly less than half of the array (64 elements) is excited to generate the outgoing plane wave. To better match the actual signal and avoid artifacts, a continuous section is excited. The angle is set based on an assumed 1540 m/s speed of sound so that the plane wave overlaps the center of the domain\n\n!Ultrasound array setup\n\nArray structure, with 4 active elements and 4 kerf elements interleaved. The recorded signal is the average of the 4 receiving cells for each element\n\nThe data simulates plane wave ultrasound data in random media" ]
[ "TAGS\n#license-mit #medical imaging #ultrasound #doi-10.57967/hf/1240 #region-us \n", "# Plane wave raw ultrasound simualted data for deep learning speed of sound inversion \n\nCite this dataset as:\n\nFeigin M, Freedman D, Anthony BW. Computing Speed-of-Sound from ultrasound: user-agnostic recovery and a new benchmark. IEEE Trans Biomed Eng. 2023; doi:10.1109/TBME.2023.3327147", "## Simulation\n\nThe full dataset consists of 112640 simulations split into 9216 simulations in the training set, 1024 in the validation set, and 1024 in the test set. The measured signal is simulated using the k-wave MATLAB toolbox. Simulations were performed for nine plane waves at \\\\(0\\\\), \\\\(\\pm 8\\\\), \\\\(\\pm 16\\\\), \\\\(\\pm 24\\\\), and \\\\(\\pm 32\\\\) element offsets, with corresponding wavefront angles of \\\\(0\\\\), \\\\(\\pm 6.7\\\\), \\\\(\\pm 13.7\\\\), \\\\(\\pm 20.2\\\\), and \\\\(\\pm 26.3\\\\) (the time delay is calculated based on 1540 m/s speed of sound so the actual angle will differ per sample), set to pass through the center of the domain. See the figures for details (three of the 9 plane waves are shown to reduce clutter). Each simulation was performed with two center frequencies, 2.5 MHz and 5 MHz, with a Gaussian window (pulse width) of 5 oscillations. An additional simulation at 4.4 MHz is available under the validation directory to allow testing for transfer learning.\n\nEach simulation comprised of \\\\(1152 \\times 1152\\\\) random speed-of-sound and \\\\(\\alpha\\\\) (attenuation) coefficient maps following power law attenuation [\\\\(\\mbox{dB} / \\mbox{cm} / \\mbox{MHz}^2\\\\)] in a domain \\\\(42.35 \\times 42.35\\\\) mm in size\n\nThe domain is constructed by layering a randomly selected set of ellipses and half-planes. For each of the resulting domains (organs), we randomly selected the speed of sound, attenuation coefficient, speckle density, and speckle amplitude. Domains were verified to not slice the probe face; i.e. the resulting maps are verified not to have a discontinuity at the probe face.\n\nThe speed of sound range is 1300 m/s to 1800 m/s. The \\\\(\\alpha\\\\) coefficient range is \\\\(0.05\\\\) to \\\\(0.15\\\\) dB/cm/MHz\\\\({}^2\\\\). Background density is set to 0.9 g/cm\\\\({}^3\\\\) (density of fat).\n\nSpeckle noise is randomly generated in the density domain so as not to affect the wavefront propagation speed (uniformly distributed point sources with 2-10 points per wavelength and uniformly distributed amplitude at \\\\(\\pm 10\\%\\\\)).", "## Probe\n\nTo match our physical hardware, we simulated a 128-element array with 64 active transmit elements. The simulation was carried out with two pulse center frequencies, 2.5 MHz and 5 MHz with a Gaussian window of 5 oscillations. \n\nThe central plane wave (zero degrees) is centered at elements 33 to 96. The probe face is placed at \\\\(y = 60\\\\) (outside the perfectly matched layer) and centered on the \\\\(x\\\\) axis. The numerical receive array is 4 elements per sensor element, with a matching kerf (spacing) value, i.e., 4 on 4 off. The signal for each receiver is summed across the 4 receiver elements to generate the 128 receive channels, and the signal is down-sampled to a 40 MHz sampling rate (ADC rate). For the transmit signal, we use a continuous array, as we found that it better matches real-world signals, so for the centered plane wave, a source is placed on all pixels with \\\\(y = 60\\\\) and \\\\(322 \\le x \\le 830\\\\) with a zero time delay on all elements.", "## File format\n\nThe data is in Matlab v7.3 (HDF5) file format create by the python hdf5storage package.\nFields in each file:\n\n- alpha_coeff: alpha coeffienct (attenuation) map {1024 - samples, 1channel, 1152 - X dimension, 1152 - Z dimension}\n- c0: speed of sound map {1024 - samples, 1, 1152 - X dimension, 1152 - Z dimension}\n- cycles: number of cycles in the acoustic wavelet\n- f: frequencies simulated\n- offsets: plane wave offsets\n- p_f<freq>_o<offset>: simulated ultrasound signal {1024 - samples, 1, 128 - reciever, 2667 - time sample}\n\n!Simulation setup\n\nthe k-wave simulation setup. The US array is placed at line 60 of the numerical grid. Due to kerf, slightly less than half of the array (64 elements) is excited to generate the outgoing plane wave. To better match the actual signal and avoid artifacts, a continuous section is excited. The angle is set based on an assumed 1540 m/s speed of sound so that the plane wave overlaps the center of the domain\n\n!Ultrasound array setup\n\nArray structure, with 4 active elements and 4 kerf elements interleaved. The recorded signal is the average of the 4 receiving cells for each element\n\nThe data simulates plane wave ultrasound data in random media" ]
[ 32, 88, 624, 268, 347 ]
[ "passage: TAGS\n#license-mit #medical imaging #ultrasound #doi-10.57967/hf/1240 #region-us \n# Plane wave raw ultrasound simualted data for deep learning speed of sound inversion \n\nCite this dataset as:\n\nFeigin M, Freedman D, Anthony BW. Computing Speed-of-Sound from ultrasound: user-agnostic recovery and a new benchmark. IEEE Trans Biomed Eng. 2023; doi:10.1109/TBME.2023.3327147", "passage: ## Simulation\n\nThe full dataset consists of 112640 simulations split into 9216 simulations in the training set, 1024 in the validation set, and 1024 in the test set. The measured signal is simulated using the k-wave MATLAB toolbox. Simulations were performed for nine plane waves at \\\\(0\\\\), \\\\(\\pm 8\\\\), \\\\(\\pm 16\\\\), \\\\(\\pm 24\\\\), and \\\\(\\pm 32\\\\) element offsets, with corresponding wavefront angles of \\\\(0\\\\), \\\\(\\pm 6.7\\\\), \\\\(\\pm 13.7\\\\), \\\\(\\pm 20.2\\\\), and \\\\(\\pm 26.3\\\\) (the time delay is calculated based on 1540 m/s speed of sound so the actual angle will differ per sample), set to pass through the center of the domain. See the figures for details (three of the 9 plane waves are shown to reduce clutter). Each simulation was performed with two center frequencies, 2.5 MHz and 5 MHz, with a Gaussian window (pulse width) of 5 oscillations. An additional simulation at 4.4 MHz is available under the validation directory to allow testing for transfer learning.\n\nEach simulation comprised of \\\\(1152 \\times 1152\\\\) random speed-of-sound and \\\\(\\alpha\\\\) (attenuation) coefficient maps following power law attenuation [\\\\(\\mbox{dB} / \\mbox{cm} / \\mbox{MHz}^2\\\\)] in a domain \\\\(42.35 \\times 42.35\\\\) mm in size\n\nThe domain is constructed by layering a randomly selected set of ellipses and half-planes. For each of the resulting domains (organs), we randomly selected the speed of sound, attenuation coefficient, speckle density, and speckle amplitude. Domains were verified to not slice the probe face; i.e. the resulting maps are verified not to have a discontinuity at the probe face.\n\nThe speed of sound range is 1300 m/s to 1800 m/s. The \\\\(\\alpha\\\\) coefficient range is \\\\(0.05\\\\) to \\\\(0.15\\\\) dB/cm/MHz\\\\({}^2\\\\). Background density is set to 0.9 g/cm\\\\({}^3\\\\) (density of fat).\n\nSpeckle noise is randomly generated in the density domain so as not to affect the wavefront propagation speed (uniformly distributed point sources with 2-10 points per wavelength and uniformly distributed amplitude at \\\\(\\pm 10\\%\\\\)).## Probe\n\nTo match our physical hardware, we simulated a 128-element array with 64 active transmit elements. The simulation was carried out with two pulse center frequencies, 2.5 MHz and 5 MHz with a Gaussian window of 5 oscillations. \n\nThe central plane wave (zero degrees) is centered at elements 33 to 96. The probe face is placed at \\\\(y = 60\\\\) (outside the perfectly matched layer) and centered on the \\\\(x\\\\) axis. The numerical receive array is 4 elements per sensor element, with a matching kerf (spacing) value, i.e., 4 on 4 off. The signal for each receiver is summed across the 4 receiver elements to generate the 128 receive channels, and the signal is down-sampled to a 40 MHz sampling rate (ADC rate). For the transmit signal, we use a continuous array, as we found that it better matches real-world signals, so for the centered plane wave, a source is placed on all pixels with \\\\(y = 60\\\\) and \\\\(322 \\le x \\le 830\\\\) with a zero time delay on all elements." ]
1ec947309e09fa14e3a6c82e7d68bedd537a2014
# Dataset Card for "rareid_find_first_sent_train_10_eval_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/rareid_find_first_sent_train_10_eval_10
[ "region:us" ]
2023-10-19T15:19:24+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 37171, "num_examples": 30}, {"name": "validation", "num_bytes": 9806, "num_examples": 10}], "download_size": 41288, "dataset_size": 46977}}
2023-10-19T16:02:12+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rareid_find_first_sent_train_10_eval_10" More Information needed
[ "# Dataset Card for \"rareid_find_first_sent_train_10_eval_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rareid_find_first_sent_train_10_eval_10\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rareid_find_first_sent_train_10_eval_10\"\n\nMore Information needed" ]
d0a3897fcafc54f974674f0318f8c01bff1c3f01
# Dataset Card for "rareid_find_second_sent_train_10_eval_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/rareid_find_second_sent_train_10_eval_10
[ "region:us" ]
2023-10-19T15:20:53+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 39358, "num_examples": 30}, {"name": "validation", "num_bytes": 10863, "num_examples": 10}], "download_size": 49044, "dataset_size": 50221}}
2023-10-19T16:02:20+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rareid_find_second_sent_train_10_eval_10" More Information needed
[ "# Dataset Card for \"rareid_find_second_sent_train_10_eval_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rareid_find_second_sent_train_10_eval_10\"\n\nMore Information needed" ]
[ 6, 28 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rareid_find_second_sent_train_10_eval_10\"\n\nMore Information needed" ]
ffc6fe107812b6821e1577d8ae91ef1ca5ed0fb0
# Dataset Card for "rareid_find_last_sent_train_10_eval_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/rareid_find_last_sent_train_10_eval_10
[ "region:us" ]
2023-10-19T15:22:10+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 38215, "num_examples": 30}, {"name": "validation", "num_bytes": 8924, "num_examples": 10}], "download_size": 42371, "dataset_size": 47139}}
2023-10-19T16:02:29+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rareid_find_last_sent_train_10_eval_10" More Information needed
[ "# Dataset Card for \"rareid_find_last_sent_train_10_eval_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rareid_find_last_sent_train_10_eval_10\"\n\nMore Information needed" ]
[ 6, 28 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rareid_find_last_sent_train_10_eval_10\"\n\nMore Information needed" ]
ceabdd1d4074e74fb92c5f4279731009f6d989c4
# Dataset Card for "AR_CW" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Kamyar-zeinalipour/AR_CW
[ "region:us" ]
2023-10-19T15:36:21+00:00
{"dataset_info": {"features": [{"name": "clue", "dtype": "string"}, {"name": "answer", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2063175, "num_examples": 57706}], "download_size": 1126121, "dataset_size": 2063175}}
2023-10-19T15:36:45+00:00
[]
[]
TAGS #region-us
# Dataset Card for "AR_CW" More Information needed
[ "# Dataset Card for \"AR_CW\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"AR_CW\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"AR_CW\"\n\nMore Information needed" ]
5a0d8f9e8d553eb979ee9e74873405d6e7f04774
# Dataset Card for "rareid_find_first_sent_train_30_eval_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/rareid_find_first_sent_train_30_eval_10
[ "region:us" ]
2023-10-19T15:37:29+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 91200, "num_examples": 70}, {"name": "validation", "num_bytes": 10372, "num_examples": 10}], "download_size": 63795, "dataset_size": 101572}}
2023-10-19T15:37:56+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rareid_find_first_sent_train_30_eval_10" More Information needed
[ "# Dataset Card for \"rareid_find_first_sent_train_30_eval_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rareid_find_first_sent_train_30_eval_10\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rareid_find_first_sent_train_30_eval_10\"\n\nMore Information needed" ]
b9515937b8e6ee9cd809be8ba2510f0692f2f7ff
# Dataset Card for "rareid_find_second_sent_train_30_eval_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/rareid_find_second_sent_train_30_eval_10
[ "region:us" ]
2023-10-19T15:37:59+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 92021, "num_examples": 70}, {"name": "validation", "num_bytes": 10518, "num_examples": 10}], "download_size": 67710, "dataset_size": 102539}}
2023-10-19T15:38:20+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rareid_find_second_sent_train_30_eval_10" More Information needed
[ "# Dataset Card for \"rareid_find_second_sent_train_30_eval_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rareid_find_second_sent_train_30_eval_10\"\n\nMore Information needed" ]
[ 6, 28 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rareid_find_second_sent_train_30_eval_10\"\n\nMore Information needed" ]
631f0f9312e6f098ba854fff59c1d85d9176133a
# Dataset Card for "rareid_find_last_sent_train_30_eval_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/rareid_find_last_sent_train_30_eval_10
[ "region:us" ]
2023-10-19T15:38:21+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 89861, "num_examples": 70}, {"name": "validation", "num_bytes": 9933, "num_examples": 10}], "download_size": 65206, "dataset_size": 99794}}
2023-10-19T15:38:35+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rareid_find_last_sent_train_30_eval_10" More Information needed
[ "# Dataset Card for \"rareid_find_last_sent_train_30_eval_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rareid_find_last_sent_train_30_eval_10\"\n\nMore Information needed" ]
[ 6, 28 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rareid_find_last_sent_train_30_eval_10\"\n\nMore Information needed" ]
d4a581d5a4422f69c4fd218ffec1bada50c3e10b
# Dataset Card for "dataset_plotqa" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
martinsinnona/plotqa
[ "region:us" ]
2023-10-19T15:44:22+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 3405264.0, "num_examples": 100}], "download_size": 0, "dataset_size": 3405264.0}}
2023-11-23T18:26:28+00:00
[]
[]
TAGS #region-us
# Dataset Card for "dataset_plotqa" More Information needed
[ "# Dataset Card for \"dataset_plotqa\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"dataset_plotqa\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"dataset_plotqa\"\n\nMore Information needed" ]
e7c619afcfd7ac090e14ad664810fb1a3e7ba1d1
# Dataset Card for "rareid_find_first_sent_train_50_eval_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/rareid_find_first_sent_train_50_eval_10
[ "region:us" ]
2023-10-19T15:48:29+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 138270, "num_examples": 110}, {"name": "validation", "num_bytes": 10194, "num_examples": 10}], "download_size": 86070, "dataset_size": 148464}}
2023-10-19T15:48:34+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rareid_find_first_sent_train_50_eval_10" More Information needed
[ "# Dataset Card for \"rareid_find_first_sent_train_50_eval_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rareid_find_first_sent_train_50_eval_10\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rareid_find_first_sent_train_50_eval_10\"\n\nMore Information needed" ]
1b668b5720d356fcc2eda7de98145b9adb3ffa1d
# Dataset Card for "rareid_find_second_sent_train_50_eval_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/rareid_find_second_sent_train_50_eval_10
[ "region:us" ]
2023-10-19T15:48:34+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 138218, "num_examples": 110}, {"name": "validation", "num_bytes": 9424, "num_examples": 10}], "download_size": 86497, "dataset_size": 147642}}
2023-10-19T15:48:40+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rareid_find_second_sent_train_50_eval_10" More Information needed
[ "# Dataset Card for \"rareid_find_second_sent_train_50_eval_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rareid_find_second_sent_train_50_eval_10\"\n\nMore Information needed" ]
[ 6, 28 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rareid_find_second_sent_train_50_eval_10\"\n\nMore Information needed" ]
3c458151a350ec8e92f1dd30791e2012eda4f446
# Dataset Card for "rareid_find_last_sent_train_50_eval_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/rareid_find_last_sent_train_50_eval_10
[ "region:us" ]
2023-10-19T15:48:40+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 138417, "num_examples": 110}, {"name": "validation", "num_bytes": 10650, "num_examples": 10}], "download_size": 84412, "dataset_size": 149067}}
2023-10-19T15:48:45+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rareid_find_last_sent_train_50_eval_10" More Information needed
[ "# Dataset Card for \"rareid_find_last_sent_train_50_eval_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rareid_find_last_sent_train_50_eval_10\"\n\nMore Information needed" ]
[ 6, 28 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rareid_find_last_sent_train_50_eval_10\"\n\nMore Information needed" ]
2ea2d4a153f6c7255454aa90b88fdbae12c6c079
# Dataset Card for "rareid_find_first_sent_train_100_eval_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/rareid_find_first_sent_train_100_eval_10
[ "region:us" ]
2023-10-19T15:49:02+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 259476, "num_examples": 210}, {"name": "validation", "num_bytes": 10158, "num_examples": 10}], "download_size": 133047, "dataset_size": 269634}}
2023-10-19T15:49:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rareid_find_first_sent_train_100_eval_10" More Information needed
[ "# Dataset Card for \"rareid_find_first_sent_train_100_eval_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rareid_find_first_sent_train_100_eval_10\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rareid_find_first_sent_train_100_eval_10\"\n\nMore Information needed" ]
d19a675f7b01b7f64e6d4f6398d1a31e92a0e0d6
# Dataset Card for "rareid_find_second_sent_train_100_eval_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/rareid_find_second_sent_train_100_eval_10
[ "region:us" ]
2023-10-19T15:49:07+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 258132, "num_examples": 210}, {"name": "validation", "num_bytes": 10381, "num_examples": 10}], "download_size": 130910, "dataset_size": 268513}}
2023-10-19T15:49:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rareid_find_second_sent_train_100_eval_10" More Information needed
[ "# Dataset Card for \"rareid_find_second_sent_train_100_eval_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rareid_find_second_sent_train_100_eval_10\"\n\nMore Information needed" ]
[ 6, 28 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rareid_find_second_sent_train_100_eval_10\"\n\nMore Information needed" ]
b3ca834272136510521adad4f116c9c50c37524e
# Dataset Card for "rareid_find_last_sent_train_100_eval_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/rareid_find_last_sent_train_100_eval_10
[ "region:us" ]
2023-10-19T15:49:13+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 255915, "num_examples": 210}, {"name": "validation", "num_bytes": 9694, "num_examples": 10}], "download_size": 128978, "dataset_size": 265609}}
2023-10-19T15:49:18+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rareid_find_last_sent_train_100_eval_10" More Information needed
[ "# Dataset Card for \"rareid_find_last_sent_train_100_eval_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rareid_find_last_sent_train_100_eval_10\"\n\nMore Information needed" ]
[ 6, 28 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rareid_find_last_sent_train_100_eval_10\"\n\nMore Information needed" ]
dc3ed459691f3087c3c6c00583247840d0385f83
# Dataset Card for "emotion_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Iliab/emotion_dataset
[ "region:us" ]
2023-10-19T15:59:34+00:00
{"dataset_info": {"features": [{"name": "question", "dtype": "string", "id": "field"}, {"name": "answer", "dtype": "string", "id": "field"}, {"name": "relevant", "list": [{"name": "user_id", "dtype": "string", "id": "question"}, {"name": "value", "dtype": "string", "id": "suggestion"}, {"name": "status", "dtype": "string", "id": "question"}]}, {"name": "relevant-suggestion", "dtype": "string", "id": "suggestion"}, {"name": "relevant-suggestion-metadata", "struct": [{"name": "type", "dtype": "string", "id": "suggestion-metadata"}, {"name": "score", "dtype": "float32", "id": "suggestion-metadata"}, {"name": "agent", "dtype": "string", "id": "suggestion-metadata"}]}, {"name": "external_id", "dtype": "string", "id": "external_id"}, {"name": "metadata", "dtype": "string", "id": "metadata"}], "splits": [{"name": "train", "num_bytes": 252, "num_examples": 1}], "download_size": 7180, "dataset_size": 252}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-19T15:59:35+00:00
[]
[]
TAGS #region-us
# Dataset Card for "emotion_dataset" More Information needed
[ "# Dataset Card for \"emotion_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"emotion_dataset\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"emotion_dataset\"\n\nMore Information needed" ]
791f05280778c2212642984c6a658c7c669404f6
# Dataset Card for "JOSIE_v928.13.63_llama" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Isaak-Carter/JOSIE_v928.13.63_llama
[ "region:us" ]
2023-10-19T15:59:56+00:00
{"dataset_info": {"features": [{"name": "sample", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 317419, "num_examples": 310}], "download_size": 75633, "dataset_size": 317419}}
2023-10-19T16:00:02+00:00
[]
[]
TAGS #region-us
# Dataset Card for "JOSIE_v928.13.63_llama" More Information needed
[ "# Dataset Card for \"JOSIE_v928.13.63_llama\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"JOSIE_v928.13.63_llama\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"JOSIE_v928.13.63_llama\"\n\nMore Information needed" ]
216af1e4d420c40efc4d47781308dc9c37eaa057
# Dataset Card for "find_second_sent_train_10_eval_10_hint3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_second_sent_train_10_eval_10_hint3
[ "region:us" ]
2023-10-19T16:06:24+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 39579, "num_examples": 30}, {"name": "validation", "num_bytes": 9273, "num_examples": 10}], "download_size": 44665, "dataset_size": 48852}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-10-31T14:51:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_second_sent_train_10_eval_10_hint3" More Information needed
[ "# Dataset Card for \"find_second_sent_train_10_eval_10_hint3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_second_sent_train_10_eval_10_hint3\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_second_sent_train_10_eval_10_hint3\"\n\nMore Information needed" ]
f061f96348e7717c9b2e26795d287c1f82e1b6ab
# Dataset Card for "find_last_sent_train_10_eval_10_hint3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_last_sent_train_10_eval_10_hint3
[ "region:us" ]
2023-10-19T16:06:30+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 39585, "num_examples": 30}, {"name": "validation", "num_bytes": 9250, "num_examples": 10}], "download_size": 45630, "dataset_size": 48835}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-10-31T14:51:47+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_last_sent_train_10_eval_10_hint3" More Information needed
[ "# Dataset Card for \"find_last_sent_train_10_eval_10_hint3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_last_sent_train_10_eval_10_hint3\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_last_sent_train_10_eval_10_hint3\"\n\nMore Information needed" ]
ca4bbc3fda348f8dacd710afc0022fa72b5366bf
# Dataset Card for "find_second_sent_train_100_eval_10_hint3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_second_sent_train_100_eval_10_hint3
[ "region:us" ]
2023-10-19T16:08:28+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 268793, "num_examples": 210}, {"name": "validation", "num_bytes": 10276, "num_examples": 10}], "download_size": 138189, "dataset_size": 279069}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-10-31T14:56:00+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_second_sent_train_100_eval_10_hint3" More Information needed
[ "# Dataset Card for \"find_second_sent_train_100_eval_10_hint3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_second_sent_train_100_eval_10_hint3\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_second_sent_train_100_eval_10_hint3\"\n\nMore Information needed" ]
ed349f3563d0cb4c7c5a426d3fd699ec38e41971
# Dataset Card for "find_last_sent_train_100_eval_10_hint3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_last_sent_train_100_eval_10_hint3
[ "region:us" ]
2023-10-19T16:08:35+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 269122, "num_examples": 210}, {"name": "validation", "num_bytes": 10545, "num_examples": 10}], "download_size": 138192, "dataset_size": 279667}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-10-31T14:56:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_last_sent_train_100_eval_10_hint3" More Information needed
[ "# Dataset Card for \"find_last_sent_train_100_eval_10_hint3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_last_sent_train_100_eval_10_hint3\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_last_sent_train_100_eval_10_hint3\"\n\nMore Information needed" ]
0b731c951cd1f268274c2bb91c30874066f58b64
# Dataset Card for "find_second_sent_train_30_eval_10_hint3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_second_sent_train_30_eval_10_hint3
[ "region:us" ]
2023-10-19T16:08:57+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 90015, "num_examples": 70}, {"name": "validation", "num_bytes": 11204, "num_examples": 10}], "download_size": 64388, "dataset_size": 101219}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-10-31T14:53:05+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_second_sent_train_30_eval_10_hint3" More Information needed
[ "# Dataset Card for \"find_second_sent_train_30_eval_10_hint3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_second_sent_train_30_eval_10_hint3\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_second_sent_train_30_eval_10_hint3\"\n\nMore Information needed" ]
55898e887bf15df00d1afa0a3af42dda01489525
# Dataset Card for "find_last_sent_train_30_eval_10_hint3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_last_sent_train_30_eval_10_hint3
[ "region:us" ]
2023-10-19T16:09:03+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 90057, "num_examples": 70}, {"name": "validation", "num_bytes": 11016, "num_examples": 10}], "download_size": 65240, "dataset_size": 101073}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-10-31T14:53:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_last_sent_train_30_eval_10_hint3" More Information needed
[ "# Dataset Card for \"find_last_sent_train_30_eval_10_hint3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_last_sent_train_30_eval_10_hint3\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_last_sent_train_30_eval_10_hint3\"\n\nMore Information needed" ]
fce1800790fccbd3823d1f2286990b61c0558244
# Dataset Card for "find_second_sent_train_50_eval_10_hint3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_second_sent_train_50_eval_10_hint3
[ "region:us" ]
2023-10-19T16:09:25+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 135101, "num_examples": 110}, {"name": "validation", "num_bytes": 9347, "num_examples": 10}], "download_size": 81466, "dataset_size": 144448}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-10-31T14:54:32+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_second_sent_train_50_eval_10_hint3" More Information needed
[ "# Dataset Card for \"find_second_sent_train_50_eval_10_hint3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_second_sent_train_50_eval_10_hint3\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_second_sent_train_50_eval_10_hint3\"\n\nMore Information needed" ]
e18d89e1dfc85bf4365eefc0b87253170612fe7b
# Dataset Card for "find_last_sent_train_50_eval_10_hint3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_last_sent_train_50_eval_10_hint3
[ "region:us" ]
2023-10-19T16:09:32+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 135382, "num_examples": 110}, {"name": "validation", "num_bytes": 9233, "num_examples": 10}], "download_size": 81619, "dataset_size": 144615}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-10-31T14:54:40+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_last_sent_train_50_eval_10_hint3" More Information needed
[ "# Dataset Card for \"find_last_sent_train_50_eval_10_hint3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_last_sent_train_50_eval_10_hint3\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_last_sent_train_50_eval_10_hint3\"\n\nMore Information needed" ]
6a5386cf72d2a148a3d1e7db841bf02cbfa0ed40
# Dialogs Re-enacted Across Languages (DRAL) corpus DRAL is a bilingual speech corpus of parallel utterances, using recorded conversations and fragments re-enacted in a different language. It is intended as a resource for research, especially for training and evaluating speech-to-speech translation models and systems. We dedicate this corpus to the public domain; there is no copyright (CC 0). DRAL is described in a new technical report: [Dialogs Re-enacted Across Languages, Version 2](https://arxiv.org/abs/2211.11584), Nigel G. Ward, Jonathan E. Avila, Emilia Rivas, Divette Marco. Some initial analyses of this data are described in our [Interspeech 2023 paper](https://arxiv.org/abs/2307.04123). The releases include 2893 short matched Spanish-English pairs (> 2 hours) taken from 104 conversations with 70 unique participants. There are also some illustrative, lower-quality, pairs in Bengali-English, Japanese-English, and French-English. All are packaged together with the full original conversations and full re-enactment recording sessions. ## Links - [DRAL home page](https://www.cs.utep.edu/nigel/dral/) - [DRAL GitHub repo](https://github.com/joneavila/DRAL) - [DRAL technical report](https://arxiv.org/abs/2211.11584) - [Interspeech 2023 paper](https://arxiv.org/abs/2307.04123)
jonavila/DRAL
[ "task_categories:translation", "language:en", "language:es", "license:cc0-1.0", "arxiv:2211.11584", "arxiv:2307.04123", "region:us" ]
2023-10-19T16:32:48+00:00
{"language": ["en", "es"], "license": "cc0-1.0", "task_categories": ["translation"]}
2023-10-23T20:01:49+00:00
[ "2211.11584", "2307.04123" ]
[ "en", "es" ]
TAGS #task_categories-translation #language-English #language-Spanish #license-cc0-1.0 #arxiv-2211.11584 #arxiv-2307.04123 #region-us
# Dialogs Re-enacted Across Languages (DRAL) corpus DRAL is a bilingual speech corpus of parallel utterances, using recorded conversations and fragments re-enacted in a different language. It is intended as a resource for research, especially for training and evaluating speech-to-speech translation models and systems. We dedicate this corpus to the public domain; there is no copyright (CC 0). DRAL is described in a new technical report: Dialogs Re-enacted Across Languages, Version 2, Nigel G. Ward, Jonathan E. Avila, Emilia Rivas, Divette Marco. Some initial analyses of this data are described in our Interspeech 2023 paper. The releases include 2893 short matched Spanish-English pairs (> 2 hours) taken from 104 conversations with 70 unique participants. There are also some illustrative, lower-quality, pairs in Bengali-English, Japanese-English, and French-English. All are packaged together with the full original conversations and full re-enactment recording sessions. ## Links - DRAL home page - DRAL GitHub repo - DRAL technical report - Interspeech 2023 paper
[ "# Dialogs Re-enacted Across Languages (DRAL) corpus\n\nDRAL is a bilingual speech corpus of parallel utterances, using recorded conversations and fragments re-enacted in a different language. It is intended as a resource for research, especially for training and evaluating speech-to-speech translation models and systems. We dedicate this corpus to the public domain; there is no copyright (CC 0).\n\nDRAL is described in a new technical report: Dialogs Re-enacted Across Languages, Version 2, Nigel G. Ward, Jonathan E. Avila, Emilia Rivas, Divette Marco.\n\nSome initial analyses of this data are described in our Interspeech 2023 paper.\n\nThe releases include 2893 short matched Spanish-English pairs (> 2 hours) taken from 104 conversations with 70 unique participants. There are also some illustrative, lower-quality, pairs in Bengali-English, Japanese-English, and French-English. All are packaged together with the full original conversations and full re-enactment recording sessions.", "## Links\n\n- DRAL home page\n- DRAL GitHub repo\n- DRAL technical report\n- Interspeech 2023 paper" ]
[ "TAGS\n#task_categories-translation #language-English #language-Spanish #license-cc0-1.0 #arxiv-2211.11584 #arxiv-2307.04123 #region-us \n", "# Dialogs Re-enacted Across Languages (DRAL) corpus\n\nDRAL is a bilingual speech corpus of parallel utterances, using recorded conversations and fragments re-enacted in a different language. It is intended as a resource for research, especially for training and evaluating speech-to-speech translation models and systems. We dedicate this corpus to the public domain; there is no copyright (CC 0).\n\nDRAL is described in a new technical report: Dialogs Re-enacted Across Languages, Version 2, Nigel G. Ward, Jonathan E. Avila, Emilia Rivas, Divette Marco.\n\nSome initial analyses of this data are described in our Interspeech 2023 paper.\n\nThe releases include 2893 short matched Spanish-English pairs (> 2 hours) taken from 104 conversations with 70 unique participants. There are also some illustrative, lower-quality, pairs in Bengali-English, Japanese-English, and French-English. All are packaged together with the full original conversations and full re-enactment recording sessions.", "## Links\n\n- DRAL home page\n- DRAL GitHub repo\n- DRAL technical report\n- Interspeech 2023 paper" ]
[ 49, 231, 25 ]
[ "passage: TAGS\n#task_categories-translation #language-English #language-Spanish #license-cc0-1.0 #arxiv-2211.11584 #arxiv-2307.04123 #region-us \n# Dialogs Re-enacted Across Languages (DRAL) corpus\n\nDRAL is a bilingual speech corpus of parallel utterances, using recorded conversations and fragments re-enacted in a different language. It is intended as a resource for research, especially for training and evaluating speech-to-speech translation models and systems. We dedicate this corpus to the public domain; there is no copyright (CC 0).\n\nDRAL is described in a new technical report: Dialogs Re-enacted Across Languages, Version 2, Nigel G. Ward, Jonathan E. Avila, Emilia Rivas, Divette Marco.\n\nSome initial analyses of this data are described in our Interspeech 2023 paper.\n\nThe releases include 2893 short matched Spanish-English pairs (> 2 hours) taken from 104 conversations with 70 unique participants. There are also some illustrative, lower-quality, pairs in Bengali-English, Japanese-English, and French-English. All are packaged together with the full original conversations and full re-enactment recording sessions.## Links\n\n- DRAL home page\n- DRAL GitHub repo\n- DRAL technical report\n- Interspeech 2023 paper" ]
c12285630857cceee2024798a84e05ee4b6ebab8
# Dataset Card for "summarize_from_feedback_tldr_3_filtered_oai_preprocessing" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
vwxyzjn/summarize_from_feedback_tldr_3_filtered_oai_preprocessing
[ "region:us" ]
2023-10-19T16:37:41+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "subreddit", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "post", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "query_token", "sequence": "int64"}, {"name": "query", "dtype": "string"}, {"name": "reference_response", "dtype": "string"}, {"name": "reference_response_token", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 984401845, "num_examples": 116722}, {"name": "validation", "num_bytes": 54382429, "num_examples": 6447}, {"name": "test", "num_bytes": 55293071, "num_examples": 6553}], "download_size": 350302087, "dataset_size": 1094077345}}
2023-11-08T02:28:58+00:00
[]
[]
TAGS #region-us
# Dataset Card for "summarize_from_feedback_tldr_3_filtered_oai_preprocessing" More Information needed
[ "# Dataset Card for \"summarize_from_feedback_tldr_3_filtered_oai_preprocessing\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"summarize_from_feedback_tldr_3_filtered_oai_preprocessing\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"summarize_from_feedback_tldr_3_filtered_oai_preprocessing\"\n\nMore Information needed" ]
2a1f98f0a70ebe3134aa6a2bbaf7adef4896fc1e
# Dataset Card for "symtune_mini" Smaller dataset for [symbol tuning](https://arxiv.org/abs/2305.08298). I reserve no rights to the dataset, but the original datasets were made available under various public licenses. Hence, consider each subset of this dataset to be licensed as the original dataset from where it comes was.
euclaise/symtune_mini
[ "arxiv:2305.08298", "region:us" ]
2023-10-19T16:39:09+00:00
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 37155051, "num_examples": 15365}], "download_size": 22656800, "dataset_size": 37155051}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-20T16:12:12+00:00
[ "2305.08298" ]
[]
TAGS #arxiv-2305.08298 #region-us
# Dataset Card for "symtune_mini" Smaller dataset for symbol tuning. I reserve no rights to the dataset, but the original datasets were made available under various public licenses. Hence, consider each subset of this dataset to be licensed as the original dataset from where it comes was.
[ "# Dataset Card for \"symtune_mini\"\n\nSmaller dataset for symbol tuning.\n\nI reserve no rights to the dataset, but the original datasets were made available under various public licenses. Hence, consider each subset of this dataset to be licensed as the original dataset from where it comes was." ]
[ "TAGS\n#arxiv-2305.08298 #region-us \n", "# Dataset Card for \"symtune_mini\"\n\nSmaller dataset for symbol tuning.\n\nI reserve no rights to the dataset, but the original datasets were made available under various public licenses. Hence, consider each subset of this dataset to be licensed as the original dataset from where it comes was." ]
[ 15, 71 ]
[ "passage: TAGS\n#arxiv-2305.08298 #region-us \n# Dataset Card for \"symtune_mini\"\n\nSmaller dataset for symbol tuning.\n\nI reserve no rights to the dataset, but the original datasets were made available under various public licenses. Hence, consider each subset of this dataset to be licensed as the original dataset from where it comes was." ]
0d6a8b24f6dd8c6fe7ced75ec11941017fd6b2fa
# Dataset Card for Evaluation run of yeontaek/Platypus2xOpenOrca-13B-IA3-v2.1 ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/yeontaek/Platypus2xOpenOrca-13B-IA3-v2.1 - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [yeontaek/Platypus2xOpenOrca-13B-IA3-v2.1](https://huggingface.co/yeontaek/Platypus2xOpenOrca-13B-IA3-v2.1) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_yeontaek__Platypus2xOpenOrca-13B-IA3-v2.1", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-10-22T14:08:24.468600](https://huggingface.co/datasets/open-llm-leaderboard/details_yeontaek__Platypus2xOpenOrca-13B-IA3-v2.1/blob/main/results_2023-10-22T14-08-24.468600.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "em": 0.005138422818791947, "em_stderr": 0.0007322104102794228, "f1": 0.07932466442953026, "f1_stderr": 0.0017316010986472678, "acc": 0.4421008477279133, "acc_stderr": 0.010182910924383982 }, "harness|drop|3": { "em": 0.005138422818791947, "em_stderr": 0.0007322104102794228, "f1": 0.07932466442953026, "f1_stderr": 0.0017316010986472678 }, "harness|gsm8k|5": { "acc": 0.10993176648976498, "acc_stderr": 0.008616195587865416 }, "harness|winogrande|5": { "acc": 0.7742699289660616, "acc_stderr": 0.011749626260902547 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_yeontaek__Platypus2xOpenOrca-13B-IA3-v2.1
[ "region:us" ]
2023-10-19T16:39:57+00:00
{"pretty_name": "Evaluation run of yeontaek/Platypus2xOpenOrca-13B-IA3-v2.1", "dataset_summary": "Dataset automatically created during the evaluation run of model [yeontaek/Platypus2xOpenOrca-13B-IA3-v2.1](https://huggingface.co/yeontaek/Platypus2xOpenOrca-13B-IA3-v2.1) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_yeontaek__Platypus2xOpenOrca-13B-IA3-v2.1\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-10-22T14:08:24.468600](https://huggingface.co/datasets/open-llm-leaderboard/details_yeontaek__Platypus2xOpenOrca-13B-IA3-v2.1/blob/main/results_2023-10-22T14-08-24.468600.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"em\": 0.005138422818791947,\n \"em_stderr\": 0.0007322104102794228,\n \"f1\": 0.07932466442953026,\n \"f1_stderr\": 0.0017316010986472678,\n \"acc\": 0.4421008477279133,\n \"acc_stderr\": 0.010182910924383982\n },\n \"harness|drop|3\": {\n \"em\": 0.005138422818791947,\n \"em_stderr\": 0.0007322104102794228,\n \"f1\": 0.07932466442953026,\n \"f1_stderr\": 0.0017316010986472678\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.10993176648976498,\n \"acc_stderr\": 0.008616195587865416\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.7742699289660616,\n \"acc_stderr\": 0.011749626260902547\n }\n}\n```", "repo_url": "https://huggingface.co/yeontaek/Platypus2xOpenOrca-13B-IA3-v2.1", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_drop_3", "data_files": [{"split": "2023_10_19T17_39_53.818572", "path": ["**/details_harness|drop|3_2023-10-19T17-39-53.818572.parquet"]}, {"split": "2023_10_22T14_08_24.468600", "path": ["**/details_harness|drop|3_2023-10-22T14-08-24.468600.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-10-22T14-08-24.468600.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_10_19T17_39_53.818572", "path": ["**/details_harness|gsm8k|5_2023-10-19T17-39-53.818572.parquet"]}, {"split": "2023_10_22T14_08_24.468600", "path": ["**/details_harness|gsm8k|5_2023-10-22T14-08-24.468600.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-10-22T14-08-24.468600.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_10_19T17_39_53.818572", "path": ["**/details_harness|winogrande|5_2023-10-19T17-39-53.818572.parquet"]}, {"split": "2023_10_22T14_08_24.468600", "path": ["**/details_harness|winogrande|5_2023-10-22T14-08-24.468600.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-10-22T14-08-24.468600.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_10_19T17_39_53.818572", "path": ["results_2023-10-19T17-39-53.818572.parquet"]}, {"split": "2023_10_22T14_08_24.468600", "path": ["results_2023-10-22T14-08-24.468600.parquet"]}, {"split": "latest", "path": ["results_2023-10-22T14-08-24.468600.parquet"]}]}]}
2023-10-22T13:08:34+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of yeontaek/Platypus2xOpenOrca-13B-IA3-v2.1 ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model yeontaek/Platypus2xOpenOrca-13B-IA3-v2.1 on the Open LLM Leaderboard. The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-10-22T14:08:24.468600(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of yeontaek/Platypus2xOpenOrca-13B-IA3-v2.1", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model yeontaek/Platypus2xOpenOrca-13B-IA3-v2.1 on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-22T14:08:24.468600(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of yeontaek/Platypus2xOpenOrca-13B-IA3-v2.1", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model yeontaek/Platypus2xOpenOrca-13B-IA3-v2.1 on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-22T14:08:24.468600(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 28, 31, 176, 66, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of yeontaek/Platypus2xOpenOrca-13B-IA3-v2.1## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model yeontaek/Platypus2xOpenOrca-13B-IA3-v2.1 on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-10-22T14:08:24.468600(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
1b3fe7b76bb3ea731a2ca46ce9adb6072c178ddd
# Dataset Card for "AgentLM-v0.1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Photolens/AgentLM-v1
[ "region:us" ]
2023-10-19T16:40:41+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1665661, "num_examples": 821}], "download_size": 303544, "dataset_size": 1665661}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-19T16:40:45+00:00
[]
[]
TAGS #region-us
# Dataset Card for "AgentLM-v0.1" More Information needed
[ "# Dataset Card for \"AgentLM-v0.1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"AgentLM-v0.1\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"AgentLM-v0.1\"\n\nMore Information needed" ]
5abdf961fe78fc460d6f8a6f35206fd9a162899e
# Dataset Card for "recycled_alpaca_v1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
umd-zhou-lab/recycled_alpaca_v1
[ "region:us" ]
2023-10-19T16:42:55+00:00
{"dataset_info": {"features": [{"name": "data", "struct": [{"name": "input", "dtype": "string"}, {"name": "instruction", "dtype": "string"}, {"name": "output", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 96478203, "num_examples": 52002}], "download_size": 52032506, "dataset_size": 96478203}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-19T16:48:08+00:00
[]
[]
TAGS #region-us
# Dataset Card for "recycled_alpaca_v1" More Information needed
[ "# Dataset Card for \"recycled_alpaca_v1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"recycled_alpaca_v1\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"recycled_alpaca_v1\"\n\nMore Information needed" ]
1b98bd44c60c6127a98f77db6b55f48752da6fb6
# Dataset Card for "recycled_wiz70_v1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
umd-zhou-lab/recycled_wiz70_v1
[ "region:us" ]
2023-10-19T16:43:07+00:00
{"dataset_info": {"features": [{"name": "data", "struct": [{"name": "input", "dtype": "string"}, {"name": "instruction", "dtype": "string"}, {"name": "output", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 176446017, "num_examples": 70000}], "download_size": 90580506, "dataset_size": 176446017}}
2023-10-19T16:47:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "recycled_wiz70_v1" More Information needed
[ "# Dataset Card for \"recycled_wiz70_v1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"recycled_wiz70_v1\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"recycled_wiz70_v1\"\n\nMore Information needed" ]
56e51842704b4b24cc8cfdf33d67752e4bab4b7c
# Dataset Card for "AgentLM-v0.1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Photolens/AgentLM-v2
[ "region:us" ]
2023-10-19T16:44:23+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2293001, "num_examples": 1134}], "download_size": 462541, "dataset_size": 2293001}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-19T16:44:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for "AgentLM-v0.1" More Information needed
[ "# Dataset Card for \"AgentLM-v0.1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"AgentLM-v0.1\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"AgentLM-v0.1\"\n\nMore Information needed" ]
0e71a2aaa3c196023c96b67f2960fca36631ae2b
# Dataset Card for "hendryks_math" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ck46/hendrycks_math
[ "region:us" ]
2023-10-19T16:48:13+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "problem", "dtype": "string"}, {"name": "level", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "solution", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 5984772, "num_examples": 7500}, {"name": "test", "num_bytes": 3732833, "num_examples": 5000}], "download_size": 4848007, "dataset_size": 9717605}}
2023-10-19T16:48:20+00:00
[]
[]
TAGS #region-us
# Dataset Card for "hendryks_math" More Information needed
[ "# Dataset Card for \"hendryks_math\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"hendryks_math\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"hendryks_math\"\n\nMore Information needed" ]
942706b69e95270b9ffa6dece1e8187c61b907d7
# DaruLM dataset for LLM adaptation ## Table of Contents - [Table of Contents](#table-of-contents) - [Description](#description) - [Usage](#usage) ## Description A growing collection of texts of various domains for Russian LLM adaptation extracted from other Hugging Face datasets and open resources. **Usage of this dataset is possible only for scientific purposes on a non-commercial basis.** **Credits:** Initial datasets were provided by Ilya Gusev **NOTICE:** Some domain splits are based on vocabulary stats and may be noisy **Current domains:** (used for ```domains``` argument in ```load_datasets```): | | | | | |------------|------------|------------|----------------| | accounting | antique | aphorisms | art | | biography | biology | buriy | business | | cinema | computers | design | dramaturgy | | economics | enwiki | essay | fantasy | | gazeta | geography | guidebooks | habr | | history | humor | language | law | | lenta | literature | medicine | military | | music | ods-tass | philosophy | pikabu | | politic | prose | psychology | reference | | religion | science | sociology | taiga-fontanka | | textbook | wiki | UNDEFINED | | ## Usage Prerequisites: ```bash pip install datasets zstandard jsonlines pysimdjson ``` Dataset iteration: ```python import datasets # Load habr and textbooks for example in datasets.load_dataset('dichspace/darulm', domains=["habr","textbook"], split="train", streaming=True): print(example.keys()) print(example) break ```
dichspace/darulm
[ "size_categories:100M<n<1B", "language:ru", "language:en", "not-for-all-audiences", "region:us" ]
2023-10-19T16:59:28+00:00
{"language": ["ru", "en"], "size_categories": ["100M<n<1B"], "pretty_name": "DaruLM", "tags": ["not-for-all-audiences"]}
2024-02-08T13:03:28+00:00
[]
[ "ru", "en" ]
TAGS #size_categories-100M<n<1B #language-Russian #language-English #not-for-all-audiences #region-us
DaruLM dataset for LLM adaptation ================================= Table of Contents ----------------- * Table of Contents * Description * Usage Description ----------- A growing collection of texts of various domains for Russian LLM adaptation extracted from other Hugging Face datasets and open resources. Usage of this dataset is possible only for scientific purposes on a non-commercial basis. Credits: Initial datasets were provided by Ilya Gusev NOTICE: Some domain splits are based on vocabulary stats and may be noisy Current domains: (used for argument in ): Usage ----- Prerequisites: Dataset iteration:
[]
[ "TAGS\n#size_categories-100M<n<1B #language-Russian #language-English #not-for-all-audiences #region-us \n" ]
[ 36 ]
[ "passage: TAGS\n#size_categories-100M<n<1B #language-Russian #language-English #not-for-all-audiences #region-us \n" ]
e275d92df6b151b94f0d5cd1d57568a9b495c54c
# UltraFeedback Paired This is a processed version of the [`openbmb/UltraFeedback`](https://huggingface.co/datasets/openbmb/UltraFeedback). The following steps were applied: - Create pairs `(response_j, response_k)` where j was rated better than k based on `overall_score` - Sample all 6 pairs for each instruction in the original data This dataset is useful for LLM alignment techniques(like DPO). The processing steps are in [this repository](https://huggingface.co/datasets/pushpdeep/UltraFeedback-paired/blob/main/Ultrafeedback_paired_version.ipynb ). The code is based on [this repository](https://huggingface.co/datasets/lvwerra/stack-exchange-paired).
pushpdeep/UltraFeedback-paired
[ "task_categories:text-generation", "size_categories:100K<n<1M", "language:en", "license:mit", "region:us" ]
2023-10-19T16:59:30+00:00
{"language": ["en"], "license": "mit", "size_categories": ["100K<n<1M"], "task_categories": ["text-generation"], "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "response_j", "dtype": "string"}, {"name": "response_k", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 946257493, "num_examples": 318777}], "download_size": 228559429, "dataset_size": 946257493}}
2023-10-19T17:23:05+00:00
[]
[ "en" ]
TAGS #task_categories-text-generation #size_categories-100K<n<1M #language-English #license-mit #region-us
# UltraFeedback Paired This is a processed version of the 'openbmb/UltraFeedback'. The following steps were applied: - Create pairs '(response_j, response_k)' where j was rated better than k based on 'overall_score' - Sample all 6 pairs for each instruction in the original data This dataset is useful for LLM alignment techniques(like DPO). The processing steps are in this repository. The code is based on this repository.
[ "# UltraFeedback Paired\n\nThis is a processed version of the 'openbmb/UltraFeedback'. The following steps were applied:\n\n- Create pairs '(response_j, response_k)' where j was rated better than k based on 'overall_score'\n- Sample all 6 pairs for each instruction in the original data\n\nThis dataset is useful for LLM alignment techniques(like DPO). The processing steps are in this repository. The code is based on this repository." ]
[ "TAGS\n#task_categories-text-generation #size_categories-100K<n<1M #language-English #license-mit #region-us \n", "# UltraFeedback Paired\n\nThis is a processed version of the 'openbmb/UltraFeedback'. The following steps were applied:\n\n- Create pairs '(response_j, response_k)' where j was rated better than k based on 'overall_score'\n- Sample all 6 pairs for each instruction in the original data\n\nThis dataset is useful for LLM alignment techniques(like DPO). The processing steps are in this repository. The code is based on this repository." ]
[ 38, 119 ]
[ "passage: TAGS\n#task_categories-text-generation #size_categories-100K<n<1M #language-English #license-mit #region-us \n# UltraFeedback Paired\n\nThis is a processed version of the 'openbmb/UltraFeedback'. The following steps were applied:\n\n- Create pairs '(response_j, response_k)' where j was rated better than k based on 'overall_score'\n- Sample all 6 pairs for each instruction in the original data\n\nThis dataset is useful for LLM alignment techniques(like DPO). The processing steps are in this repository. The code is based on this repository." ]
d3f705fa8c28a6ec42fbbcefe3fe660b8a67ba3e
# Dataset Card for Dataset Name <!-- Provide a quick summary of the dataset. --> This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
alokkulkarni/transactions
[ "region:us" ]
2023-10-19T17:08:43+00:00
{}
2023-10-19T18:35:31+00:00
[]
[]
TAGS #region-us
# Dataset Card for Dataset Name This dataset card aims to be a base template for new datasets. It has been generated using this raw template. ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 34, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
5acffe5478b545ddc532940469a39467e773e666
# Dataset Card for "summarize_from_feedback_oai_preprocessing" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
vwxyzjn/summarize_from_feedback_oai_preprocessing
[ "region:us" ]
2023-10-19T17:18:24+00:00
{"dataset_info": {"features": [{"name": "info", "struct": [{"name": "id", "dtype": "string"}, {"name": "post", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "subreddit", "dtype": "string"}, {"name": "site", "dtype": "string"}, {"name": "article", "dtype": "string"}]}, {"name": "summaries", "list": [{"name": "text", "dtype": "string"}, {"name": "policy", "dtype": "string"}, {"name": "note", "dtype": "string"}]}, {"name": "choice", "dtype": "int32"}, {"name": "worker", "dtype": "string"}, {"name": "batch", "dtype": "string"}, {"name": "split", "dtype": "string"}, {"name": "extra", "struct": [{"name": "confidence", "dtype": "int32"}]}, {"name": "query_token", "sequence": "int64"}, {"name": "query", "dtype": "string"}, {"name": "response0", "dtype": "string"}, {"name": "response0_token", "sequence": "int64"}, {"name": "response1", "dtype": "string"}, {"name": "response1_token", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 862825302, "num_examples": 92858}, {"name": "validation", "num_bytes": 812584211, "num_examples": 86086}], "download_size": 127954862, "dataset_size": 1675409513}}
2023-11-08T02:36:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "summarize_from_feedback_oai_preprocessing" More Information needed
[ "# Dataset Card for \"summarize_from_feedback_oai_preprocessing\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"summarize_from_feedback_oai_preprocessing\"\n\nMore Information needed" ]
[ 6, 25 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"summarize_from_feedback_oai_preprocessing\"\n\nMore Information needed" ]
2934ad53c8250ba4e4130464a6dae3139d1053de
# Dataset Card for MedInstruct ## Dataset Description - **Homepage:** - **Repository:** https://github.com/XZhang97666/AlpaCare - **Paper:** - **Point of Contact:** [Xinlu Zhang](xinluzhang@ucsb,edu) ### Dataset Summary **MedInstruct** encompasses: 1. **MedInstruct-52k**: A dataset comprising 52,000 medical instructions and responses. Instructions are crafted by OpenAI's GPT-4 engine, and the responses are formulated by the GPT-3.5-turbo engine. 2. **MedInstruct-test**: A set of 217 clinical craft free-form instruction evaluation tests. 3. **med_seed**: The clinician-crafted seed set as a denomination to prompt GPT-4 for task generation. **MedInstruct-52k** can be used to conduct instruction-tuning of language models, aiming to enhance their instruction-following capability and medical proficiency. \ On the other hand, **MedInstruct-test** can sever as an evaluator of models' instruction-following ability in the medical area. Specifically, the **MedInstruct-52k** dataset was obtained by: *Instructions*: - Beginning with a medical task seed set that contains 167 clinical-craft clinical tasks. - Utilizing the GPT-4 engine for instruction data generation. - Crafting a [prompt](https://github.com/XZhang97666/AlpaCare/blob/master/prompt/instruction_prompt.txt) to serve as the medical instruction generation guidelines for GPT-4. - To reduce costs in task generation and introduce greater randomness, 12 instructions are generated at once. This is followed by the selection of 3 random seed instructions from the seed set. *Responses*: - The GPT-3.5-turbo engine is used to synthesize responses, processing each instruction individually. Here's an illustration of an instruction-response pair: ```json { instruction: "Rewrite this instruction on how to use an inhaler to be more patient-friendly and less technical." input: "Exhale deeply, engage mouthpiece of inhaler, depress canister while simultaneously inhaling long and deep, hold the breath for ten seconds, then gently exhale" output: "Here's a more patient-friendly version of the instruction on how to use an inhaler: 1. Take a deep breath and completely breathe out. 2. Put the mouthpiece of the inhaler into your mouth and seal your lips around it properly. 3. Press down the canister while breathing in slowly and deeply at the same time. 4. Hold your breath for ten seconds. 5. Finally, breathe out gently." } ``` ## Citation: If you think it is a useful repo, please cite the paper: ``` @misc{zhang2023alpacareinstructiontuned, title={AlpaCare:Instruction-tuned Large Language Models for Medical Application}, author={Xinlu Zhang and Chenxin Tian and Xianjun Yang and Lichang Chen and Zekun Li and Linda Ruth Petzold}, year={2023}, eprint={2310.14558}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
xz97/MedInstruct
[ "task_categories:text-generation", "size_categories:10K<n<100K", "language:en", "license:cc-by-nc-4.0", "instruction-finetuning", "arxiv:2310.14558", "region:us" ]
2023-10-19T17:45:58+00:00
{"language": ["en"], "license": "cc-by-nc-4.0", "size_categories": ["10K<n<100K"], "task_categories": ["text-generation"], "pretty_name": "MedInstruct", "tags": ["instruction-finetuning"]}
2023-10-24T00:27:45+00:00
[ "2310.14558" ]
[ "en" ]
TAGS #task_categories-text-generation #size_categories-10K<n<100K #language-English #license-cc-by-nc-4.0 #instruction-finetuning #arxiv-2310.14558 #region-us
# Dataset Card for MedInstruct ## Dataset Description - Homepage: - Repository: URL - Paper: - Point of Contact: Xinlu Zhang ### Dataset Summary MedInstruct encompasses: 1. MedInstruct-52k: A dataset comprising 52,000 medical instructions and responses. Instructions are crafted by OpenAI's GPT-4 engine, and the responses are formulated by the GPT-3.5-turbo engine. 2. MedInstruct-test: A set of 217 clinical craft free-form instruction evaluation tests. 3. med_seed: The clinician-crafted seed set as a denomination to prompt GPT-4 for task generation. MedInstruct-52k can be used to conduct instruction-tuning of language models, aiming to enhance their instruction-following capability and medical proficiency. \ On the other hand, MedInstruct-test can sever as an evaluator of models' instruction-following ability in the medical area. Specifically, the MedInstruct-52k dataset was obtained by: *Instructions*: - Beginning with a medical task seed set that contains 167 clinical-craft clinical tasks. - Utilizing the GPT-4 engine for instruction data generation. - Crafting a prompt to serve as the medical instruction generation guidelines for GPT-4. - To reduce costs in task generation and introduce greater randomness, 12 instructions are generated at once. This is followed by the selection of 3 random seed instructions from the seed set. *Responses*: - The GPT-3.5-turbo engine is used to synthesize responses, processing each instruction individually. Here's an illustration of an instruction-response pair: : If you think it is a useful repo, please cite the paper:
[ "# Dataset Card for MedInstruct", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Point of Contact: Xinlu Zhang", "### Dataset Summary\n\nMedInstruct encompasses:\n1. MedInstruct-52k: A dataset comprising 52,000 medical instructions and responses. Instructions are crafted by OpenAI's GPT-4 engine, and the responses are formulated by the GPT-3.5-turbo engine.\n2. MedInstruct-test: A set of 217 clinical craft free-form instruction evaluation tests.\n3. med_seed: The clinician-crafted seed set as a denomination to prompt GPT-4 for task generation.\n\n\nMedInstruct-52k can be used to conduct instruction-tuning of language models, aiming to enhance their instruction-following capability and medical proficiency. \\\nOn the other hand, MedInstruct-test can sever as an evaluator of models' instruction-following ability in the medical area.\n\nSpecifically, the MedInstruct-52k dataset was obtained by:\n\n*Instructions*:\n - Beginning with a medical task seed set that contains 167 clinical-craft clinical tasks.\n - Utilizing the GPT-4 engine for instruction data generation.\n - Crafting a prompt to serve as the medical instruction generation guidelines for GPT-4.\n - To reduce costs in task generation and introduce greater randomness, 12 instructions are generated at once. This is followed by the selection of 3 random seed instructions from the seed set.\n \n*Responses*:\n - The GPT-3.5-turbo engine is used to synthesize responses, processing each instruction individually.\n \n Here's an illustration of an instruction-response pair:\n\n\n\n\n:\nIf you think it is a useful repo, please cite the paper:" ]
[ "TAGS\n#task_categories-text-generation #size_categories-10K<n<100K #language-English #license-cc-by-nc-4.0 #instruction-finetuning #arxiv-2310.14558 #region-us \n", "# Dataset Card for MedInstruct", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Point of Contact: Xinlu Zhang", "### Dataset Summary\n\nMedInstruct encompasses:\n1. MedInstruct-52k: A dataset comprising 52,000 medical instructions and responses. Instructions are crafted by OpenAI's GPT-4 engine, and the responses are formulated by the GPT-3.5-turbo engine.\n2. MedInstruct-test: A set of 217 clinical craft free-form instruction evaluation tests.\n3. med_seed: The clinician-crafted seed set as a denomination to prompt GPT-4 for task generation.\n\n\nMedInstruct-52k can be used to conduct instruction-tuning of language models, aiming to enhance their instruction-following capability and medical proficiency. \\\nOn the other hand, MedInstruct-test can sever as an evaluator of models' instruction-following ability in the medical area.\n\nSpecifically, the MedInstruct-52k dataset was obtained by:\n\n*Instructions*:\n - Beginning with a medical task seed set that contains 167 clinical-craft clinical tasks.\n - Utilizing the GPT-4 engine for instruction data generation.\n - Crafting a prompt to serve as the medical instruction generation guidelines for GPT-4.\n - To reduce costs in task generation and introduce greater randomness, 12 instructions are generated at once. This is followed by the selection of 3 random seed instructions from the seed set.\n \n*Responses*:\n - The GPT-3.5-turbo engine is used to synthesize responses, processing each instruction individually.\n \n Here's an illustration of an instruction-response pair:\n\n\n\n\n:\nIf you think it is a useful repo, please cite the paper:" ]
[ 59, 8, 25, 366 ]
[ "passage: TAGS\n#task_categories-text-generation #size_categories-10K<n<100K #language-English #license-cc-by-nc-4.0 #instruction-finetuning #arxiv-2310.14558 #region-us \n# Dataset Card for MedInstruct## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Point of Contact: Xinlu Zhang### Dataset Summary\n\nMedInstruct encompasses:\n1. MedInstruct-52k: A dataset comprising 52,000 medical instructions and responses. Instructions are crafted by OpenAI's GPT-4 engine, and the responses are formulated by the GPT-3.5-turbo engine.\n2. MedInstruct-test: A set of 217 clinical craft free-form instruction evaluation tests.\n3. med_seed: The clinician-crafted seed set as a denomination to prompt GPT-4 for task generation.\n\n\nMedInstruct-52k can be used to conduct instruction-tuning of language models, aiming to enhance their instruction-following capability and medical proficiency. \\\nOn the other hand, MedInstruct-test can sever as an evaluator of models' instruction-following ability in the medical area.\n\nSpecifically, the MedInstruct-52k dataset was obtained by:\n\n*Instructions*:\n - Beginning with a medical task seed set that contains 167 clinical-craft clinical tasks.\n - Utilizing the GPT-4 engine for instruction data generation.\n - Crafting a prompt to serve as the medical instruction generation guidelines for GPT-4.\n - To reduce costs in task generation and introduce greater randomness, 12 instructions are generated at once. This is followed by the selection of 3 random seed instructions from the seed set.\n \n*Responses*:\n - The GPT-3.5-turbo engine is used to synthesize responses, processing each instruction individually.\n \n Here's an illustration of an instruction-response pair:\n\n\n\n\n:\nIf you think it is a useful repo, please cite the paper:" ]
5aebe722f85d99acdd6ecb10ef2c790ef6d1f4fb
# Dataset Card for "find_second_sent_train_10_eval_10_hint10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_second_sent_train_10_eval_10_hint10
[ "region:us" ]
2023-10-19T17:52:58+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 40008, "num_examples": 30}, {"name": "validation", "num_bytes": 9749, "num_examples": 10}], "download_size": 45762, "dataset_size": 49757}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-10-31T14:50:42+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_second_sent_train_10_eval_10_hint10" More Information needed
[ "# Dataset Card for \"find_second_sent_train_10_eval_10_hint10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_second_sent_train_10_eval_10_hint10\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_second_sent_train_10_eval_10_hint10\"\n\nMore Information needed" ]
fb9221fbd8ff9d957864b3989c26c483cbc2c365
# Dataset Card for "find_last_sent_train_10_eval_10_hint10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_last_sent_train_10_eval_10_hint10
[ "region:us" ]
2023-10-19T17:53:06+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 40003, "num_examples": 30}, {"name": "validation", "num_bytes": 9702, "num_examples": 10}], "download_size": 46718, "dataset_size": 49705}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-10-31T14:50:50+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_last_sent_train_10_eval_10_hint10" More Information needed
[ "# Dataset Card for \"find_last_sent_train_10_eval_10_hint10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_last_sent_train_10_eval_10_hint10\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_last_sent_train_10_eval_10_hint10\"\n\nMore Information needed" ]
3e6d25f589ef4a17f4f61d970b5e4f2e9ce12445
# Dataset Card for "humaneval-py-mutants" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
nuprl-staging/humaneval-py-mutants
[ "region:us" ]
2023-10-19T17:53:18+00:00
{"dataset_info": {"features": [{"name": "name", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "tests", "dtype": "string"}, {"name": "prompt", "dtype": "string"}, {"name": "stop_tokens", "sequence": "string"}, {"name": "correct", "dtype": "string"}, {"name": "mutants", "sequence": "string"}, {"name": "errors", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 657021, "num_examples": 141}], "download_size": 0, "dataset_size": 657021}}
2023-10-19T17:58:11+00:00
[]
[]
TAGS #region-us
# Dataset Card for "humaneval-py-mutants" More Information needed
[ "# Dataset Card for \"humaneval-py-mutants\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"humaneval-py-mutants\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"humaneval-py-mutants\"\n\nMore Information needed" ]
3f7b2bc08064b0756240753fabf7815df9a2ac46
# Dataset Card for "find_second_sent_train_100_eval_10_hint10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_second_sent_train_100_eval_10_hint10
[ "region:us" ]
2023-10-19T17:53:27+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 273137, "num_examples": 210}, {"name": "validation", "num_bytes": 10682, "num_examples": 10}], "download_size": 142218, "dataset_size": 283819}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-10-31T14:55:01+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_second_sent_train_100_eval_10_hint10" More Information needed
[ "# Dataset Card for \"find_second_sent_train_100_eval_10_hint10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_second_sent_train_100_eval_10_hint10\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_second_sent_train_100_eval_10_hint10\"\n\nMore Information needed" ]
6543c188b7e1d603253e2b9e31e89b4ef62536d5
# Dataset Card for "find_last_sent_train_100_eval_10_hint10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_last_sent_train_100_eval_10_hint10
[ "region:us" ]
2023-10-19T17:53:34+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 273386, "num_examples": 210}, {"name": "validation", "num_bytes": 11007, "num_examples": 10}], "download_size": 142400, "dataset_size": 284393}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-10-31T14:55:10+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_last_sent_train_100_eval_10_hint10" More Information needed
[ "# Dataset Card for \"find_last_sent_train_100_eval_10_hint10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_last_sent_train_100_eval_10_hint10\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_last_sent_train_100_eval_10_hint10\"\n\nMore Information needed" ]
ac7b53bc837cc0b3efb4bb9a53c665c2753987c7
# Dataset Card for "find_second_sent_train_30_eval_10_hint10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_second_sent_train_30_eval_10_hint10
[ "region:us" ]
2023-10-19T17:53:57+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 91249, "num_examples": 70}, {"name": "validation", "num_bytes": 11640, "num_examples": 10}], "download_size": 65987, "dataset_size": 102889}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-10-31T14:52:09+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_second_sent_train_30_eval_10_hint10" More Information needed
[ "# Dataset Card for \"find_second_sent_train_30_eval_10_hint10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_second_sent_train_30_eval_10_hint10\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_second_sent_train_30_eval_10_hint10\"\n\nMore Information needed" ]
33c700015a01e5d4b58c79f1ab4ef789c031627e
# Dataset Card for "find_last_sent_train_30_eval_10_hint10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_last_sent_train_30_eval_10_hint10
[ "region:us" ]
2023-10-19T17:54:04+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 91352, "num_examples": 70}, {"name": "validation", "num_bytes": 11480, "num_examples": 10}], "download_size": 67036, "dataset_size": 102832}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-10-31T14:52:17+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_last_sent_train_30_eval_10_hint10" More Information needed
[ "# Dataset Card for \"find_last_sent_train_30_eval_10_hint10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_last_sent_train_30_eval_10_hint10\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_last_sent_train_30_eval_10_hint10\"\n\nMore Information needed" ]
66e13d093f2c8df0905bb3352c16737c8b7ff665
# Dataset Card for "find_second_sent_train_50_eval_10_hint10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_second_sent_train_50_eval_10_hint10
[ "region:us" ]
2023-10-19T17:54:27+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 137201, "num_examples": 110}, {"name": "validation", "num_bytes": 9756, "num_examples": 10}], "download_size": 83906, "dataset_size": 146957}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-10-31T14:53:35+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_second_sent_train_50_eval_10_hint10" More Information needed
[ "# Dataset Card for \"find_second_sent_train_50_eval_10_hint10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_second_sent_train_50_eval_10_hint10\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_second_sent_train_50_eval_10_hint10\"\n\nMore Information needed" ]
712484cc9aeb4bf7f78396c29eb27aac5f5627cd
# Dataset Card for "find_last_sent_train_50_eval_10_hint10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_last_sent_train_50_eval_10_hint10
[ "region:us" ]
2023-10-19T17:54:32+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 137507, "num_examples": 110}, {"name": "validation", "num_bytes": 9701, "num_examples": 10}], "download_size": 84197, "dataset_size": 147208}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-10-31T14:53:43+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_last_sent_train_50_eval_10_hint10" More Information needed
[ "# Dataset Card for \"find_last_sent_train_50_eval_10_hint10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_last_sent_train_50_eval_10_hint10\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_last_sent_train_50_eval_10_hint10\"\n\nMore Information needed" ]
59b7d6c85d280e4e8612138e2ff815aba11bea71
# Dataset Card for "find_second_sent_train_10_eval_10_hint5" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_second_sent_train_10_eval_10_hint5
[ "region:us" ]
2023-10-19T17:56:50+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 39713, "num_examples": 30}, {"name": "validation", "num_bytes": 9412, "num_examples": 10}], "download_size": 45009, "dataset_size": 49125}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-10-31T14:51:10+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_second_sent_train_10_eval_10_hint5" More Information needed
[ "# Dataset Card for \"find_second_sent_train_10_eval_10_hint5\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_second_sent_train_10_eval_10_hint5\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_second_sent_train_10_eval_10_hint5\"\n\nMore Information needed" ]
ce2836ad1606f162a18800db0740c1160b205a61
# Dataset Card for "find_last_sent_train_10_eval_10_hint5" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_last_sent_train_10_eval_10_hint5
[ "region:us" ]
2023-10-19T17:56:55+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 39698, "num_examples": 30}, {"name": "validation", "num_bytes": 9384, "num_examples": 10}], "download_size": 45959, "dataset_size": 49082}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-10-31T14:51:18+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_last_sent_train_10_eval_10_hint5" More Information needed
[ "# Dataset Card for \"find_last_sent_train_10_eval_10_hint5\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_last_sent_train_10_eval_10_hint5\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_last_sent_train_10_eval_10_hint5\"\n\nMore Information needed" ]
763c125b500c2992939135411b2fedd11ab9050f
# Dataset Card for "find_second_sent_train_30_eval_10_hint5" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_second_sent_train_30_eval_10_hint5
[ "region:us" ]
2023-10-19T17:58:23+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 90400, "num_examples": 70}, {"name": "validation", "num_bytes": 11329, "num_examples": 10}], "download_size": 64865, "dataset_size": 101729}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-10-31T14:52:37+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_second_sent_train_30_eval_10_hint5" More Information needed
[ "# Dataset Card for \"find_second_sent_train_30_eval_10_hint5\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_second_sent_train_30_eval_10_hint5\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_second_sent_train_30_eval_10_hint5\"\n\nMore Information needed" ]
7f712bcb2700e77eb5e9375ff4e1faf34099af1f
# Dataset Card for "find_last_sent_train_30_eval_10_hint5" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_last_sent_train_30_eval_10_hint5
[ "region:us" ]
2023-10-19T17:58:30+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 90407, "num_examples": 70}, {"name": "validation", "num_bytes": 11176, "num_examples": 10}], "download_size": 65754, "dataset_size": 101583}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-10-31T14:52:45+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_last_sent_train_30_eval_10_hint5" More Information needed
[ "# Dataset Card for \"find_last_sent_train_30_eval_10_hint5\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_last_sent_train_30_eval_10_hint5\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_last_sent_train_30_eval_10_hint5\"\n\nMore Information needed" ]
c5a90125c02a052c7ae4bbc306b08bad2118dea0
# Dataset Card for "find_second_sent_train_50_eval_10_hint5" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_second_sent_train_50_eval_10_hint5
[ "region:us" ]
2023-10-19T17:59:56+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 135743, "num_examples": 110}, {"name": "validation", "num_bytes": 9461, "num_examples": 10}], "download_size": 82208, "dataset_size": 145204}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-10-31T14:54:03+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_second_sent_train_50_eval_10_hint5" More Information needed
[ "# Dataset Card for \"find_second_sent_train_50_eval_10_hint5\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_second_sent_train_50_eval_10_hint5\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_second_sent_train_50_eval_10_hint5\"\n\nMore Information needed" ]
df706212a1ba7b90428edc479417e1cd048ce3fb
# Dataset Card for "find_last_sent_train_50_eval_10_hint5" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_last_sent_train_50_eval_10_hint5
[ "region:us" ]
2023-10-19T18:00:02+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 135976, "num_examples": 110}, {"name": "validation", "num_bytes": 9357, "num_examples": 10}], "download_size": 82351, "dataset_size": 145333}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-10-31T14:54:11+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_last_sent_train_50_eval_10_hint5" More Information needed
[ "# Dataset Card for \"find_last_sent_train_50_eval_10_hint5\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_last_sent_train_50_eval_10_hint5\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_last_sent_train_50_eval_10_hint5\"\n\nMore Information needed" ]
877badc63f575347ce9497aa3eb7eff661bde254
# Dataset Card for "find_second_sent_train_100_eval_10_hint5" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_second_sent_train_100_eval_10_hint5
[ "region:us" ]
2023-10-19T18:01:33+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 270093, "num_examples": 210}, {"name": "validation", "num_bytes": 10392, "num_examples": 10}], "download_size": 139398, "dataset_size": 280485}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-10-31T14:55:31+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_second_sent_train_100_eval_10_hint5" More Information needed
[ "# Dataset Card for \"find_second_sent_train_100_eval_10_hint5\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_second_sent_train_100_eval_10_hint5\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_second_sent_train_100_eval_10_hint5\"\n\nMore Information needed" ]
dbb9e35c25657b8f2bad9454f5f1647e845dde9b
# Dataset Card for "find_last_sent_train_100_eval_10_hint5" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_last_sent_train_100_eval_10_hint5
[ "region:us" ]
2023-10-19T18:01:39+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 270321, "num_examples": 210}, {"name": "validation", "num_bytes": 10691, "num_examples": 10}], "download_size": 139464, "dataset_size": 281012}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-10-31T14:55:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_last_sent_train_100_eval_10_hint5" More Information needed
[ "# Dataset Card for \"find_last_sent_train_100_eval_10_hint5\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_last_sent_train_100_eval_10_hint5\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_last_sent_train_100_eval_10_hint5\"\n\nMore Information needed" ]
9272e9cab5e58b3dd4ccd47c2885b26d7affdb29
# Dataset Card for "youtube-data-various-domain" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yongchanskii/youtube-data-various-domain
[ "region:us" ]
2023-10-19T18:07:02+00:00
{"dataset_info": {"features": [{"name": "source", "dtype": "string"}, {"name": "channelName", "dtype": "string"}, {"name": "category", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "videoId", "dtype": "string"}, {"name": "domainTag", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "transcriptionPath", "dtype": "string"}, {"name": "start", "dtype": "float64"}, {"name": "end", "dtype": "float64"}, {"name": "WER", "dtype": "float64"}, {"name": "CER", "dtype": "float64"}, {"name": "referenceText", "dtype": "string"}, {"name": "hypotheseText", "dtype": "string"}, {"name": "referenceTextLength", "dtype": "int64"}, {"name": "hypotheseTextLength", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 559381442.864, "num_examples": 2288}, {"name": "test", "num_bytes": 137840916.0, "num_examples": 572}], "download_size": 691274587, "dataset_size": 697222358.864}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]}
2023-12-06T15:07:05+00:00
[]
[]
TAGS #region-us
# Dataset Card for "youtube-data-various-domain" More Information needed
[ "# Dataset Card for \"youtube-data-various-domain\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"youtube-data-various-domain\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"youtube-data-various-domain\"\n\nMore Information needed" ]
6095c9dc704c44dad1a3d7bd1268431d302ab5e6
# Dataset Card for "renamed_waves" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Nbardy/renamed_waves
[ "region:us" ]
2023-10-19T18:21:43+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 498961211.25, "num_examples": 1306}], "download_size": 497509644, "dataset_size": 498961211.25}}
2023-10-19T18:27:20+00:00
[]
[]
TAGS #region-us
# Dataset Card for "renamed_waves" More Information needed
[ "# Dataset Card for \"renamed_waves\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"renamed_waves\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"renamed_waves\"\n\nMore Information needed" ]
84ea67f83cec9692ad254eaa02c9731b24ecfe4c
# Dataset Card for "MedQuAD" This dataset is the converted version of [MedQuAD](https://github.com/abachaa/MedQuAD/tree/master). Some notes about the data: * Multiple values in the `umls_cui`, `umls_semantic_types`, `synonyms` columns are separated by `|` character. * Answers for [`GARD`, `MPlusHerbsSupplements`, `ADAM`, `MPlusDrugs`] sources (31,034 records) are removed from the original dataset to respect the MedlinePlus copyright. * UMLS (`umls`): Unified Medical Language System * CUI (`cui`): Concept Unique Identifier ## Question type discrepancies between the paper and dataset We noticed there are minor discrepancies between the question types mentioned in the paper and the question types in the dataset. Here is a list of these discrepancies and how you can map types in the dataset to those in the paper: | **Dataset question type** | **Paper question type** | | --- | --- | | how can i learn more | learn more | | brand names of combination products | brand names | | other information | information | | outlook | prognosis | | exams and tests | diagnosis (exams and tests) | | stages | ? | | precautions | ? | | interaction**s** with herbs and supplements | interaction with herbs and supplements | | when to contact a medical professional | contact a medical professional | | research | research (or clinical trial) | | interaction**s** with medications | interaction with medications | | interaction**s** with foods | interaction with food | ## Reference If you use MedQuAD, please cite the original paper: ``` @ARTICLE{BenAbacha-BMC-2019, author = {Asma {Ben Abacha} and Dina Demner{-}Fushman}, title = {A Question-Entailment Approach to Question Answering}, journal = {{BMC} Bioinform.}, volume = {20}, number = {1}, pages = {511:1--511:23}, year = {2019}, url = {https://bmcbioinformatics.biomedcentral.com/articles/10.1186/s12859-019-3119-4} } ```
lavita/MedQuAD
[ "task_categories:question-answering", "size_categories:10K<n<100K", "language:en", "medical", "region:us" ]
2023-10-19T18:39:05+00:00
{"language": ["en"], "size_categories": ["10K<n<100K"], "task_categories": ["question-answering"], "dataset_info": {"features": [{"name": "document_id", "dtype": "string"}, {"name": "document_source", "dtype": "string"}, {"name": "document_url", "dtype": "string"}, {"name": "category", "dtype": "string"}, {"name": "umls_cui", "dtype": "string"}, {"name": "umls_semantic_types", "dtype": "string"}, {"name": "umls_semantic_group", "dtype": "string"}, {"name": "synonyms", "dtype": "string"}, {"name": "question_id", "dtype": "string"}, {"name": "question_focus", "dtype": "string"}, {"name": "question_type", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 34989308, "num_examples": 47441}], "download_size": 10718159, "dataset_size": 34989308}, "tags": ["medical"]}
2023-12-22T22:28:40+00:00
[]
[ "en" ]
TAGS #task_categories-question-answering #size_categories-10K<n<100K #language-English #medical #region-us
Dataset Card for "MedQuAD" ========================== This dataset is the converted version of MedQuAD. Some notes about the data: * Multiple values in the 'umls\_cui', 'umls\_semantic\_types', 'synonyms' columns are separated by '|' character. * Answers for ['GARD', 'MPlusHerbsSupplements', 'ADAM', 'MPlusDrugs'] sources (31,034 records) are removed from the original dataset to respect the MedlinePlus copyright. * UMLS ('umls'): Unified Medical Language System * CUI ('cui'): Concept Unique Identifier Question type discrepancies between the paper and dataset --------------------------------------------------------- We noticed there are minor discrepancies between the question types mentioned in the paper and the question types in the dataset. Here is a list of these discrepancies and how you can map types in the dataset to those in the paper: Reference --------- If you use MedQuAD, please cite the original paper:
[]
[ "TAGS\n#task_categories-question-answering #size_categories-10K<n<100K #language-English #medical #region-us \n" ]
[ 37 ]
[ "passage: TAGS\n#task_categories-question-answering #size_categories-10K<n<100K #language-English #medical #region-us \n" ]
ac260fea53c71eb6078767cc9b283c7fe76a323e
# Dataset Card for "Hardware Phi-1.5B Small Dataset" **✉ Correspondence to:** Weimin Fu ([email protected]) or Xiaolong Guo ([email protected]) ## Citation Information Please cite the following paper when using the OSHD Dataset. ``` @article{fuhardware, title={Hardware Phi-1.5 B: A Large Language Model Encodes Hardware Domain Specific Knowledge}, author={Fu, Weimin and Li, Shijie and Zhao, Yifang and Ma, Haocheng and Dutta, Raj and Zhang, Xuan and Yang, Kaichen and Jin, Yier and Guo, Xiaolong}, journal={29th IEEE/ACM Asia and South Pacific Design Automation Conference (ASP-DAC)}, year={2024} } ``` ### Update from our group for Hardware domain-specific LLM: Blog: [Large Language Model for Hardware Security](https://ece.k-state.edu/research/hardware-security/llm.html) HomePage: [Hardware Security Lab](https://ece.k-state.edu/research/hardware-security/) ## Acknowledgment Portions of this work were supported by the National Science Foundation (CCF-2019310, First Award Program of ARISE in EPSCoR 2148878). [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
KSU-HW-SEC/hardware_code_and_sec_small
[ "region:us" ]
2023-10-19T18:45:13+00:00
{"dataset_info": {"features": [{"name": "content", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 10022233711, "num_examples": 510252}], "download_size": 2894629932, "dataset_size": 10022233711}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2024-01-23T04:57:24+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Hardware Phi-1.5B Small Dataset" Correspondence to: Weimin Fu (weiminf@URL) or Xiaolong Guo (guoxiaolong@URL) Please cite the following paper when using the OSHD Dataset. ### Update from our group for Hardware domain-specific LLM: Blog: Large Language Model for Hardware Security HomePage: Hardware Security Lab ## Acknowledgment Portions of this work were supported by the National Science Foundation (CCF-2019310, First Award Program of ARISE in EPSCoR 2148878). More Information needed
[ "# Dataset Card for \"Hardware Phi-1.5B Small Dataset\"\n\n Correspondence to: Weimin Fu (weiminf@URL) or Xiaolong Guo (guoxiaolong@URL)\n\nPlease cite the following paper when using the OSHD Dataset.", "### Update from our group for Hardware domain-specific LLM:\n\nBlog: Large Language Model for Hardware Security\n\nHomePage: Hardware Security Lab", "## Acknowledgment\nPortions of this work were supported by the National Science Foundation (CCF-2019310, First Award Program of ARISE in EPSCoR 2148878).\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Hardware Phi-1.5B Small Dataset\"\n\n Correspondence to: Weimin Fu (weiminf@URL) or Xiaolong Guo (guoxiaolong@URL)\n\nPlease cite the following paper when using the OSHD Dataset.", "### Update from our group for Hardware domain-specific LLM:\n\nBlog: Large Language Model for Hardware Security\n\nHomePage: Hardware Security Lab", "## Acknowledgment\nPortions of this work were supported by the National Science Foundation (CCF-2019310, First Award Program of ARISE in EPSCoR 2148878).\n\nMore Information needed" ]
[ 6, 60, 28, 43 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"Hardware Phi-1.5B Small Dataset\"\n\n Correspondence to: Weimin Fu (weiminf@URL) or Xiaolong Guo (guoxiaolong@URL)\n\nPlease cite the following paper when using the OSHD Dataset.### Update from our group for Hardware domain-specific LLM:\n\nBlog: Large Language Model for Hardware Security\n\nHomePage: Hardware Security Lab## Acknowledgment\nPortions of this work were supported by the National Science Foundation (CCF-2019310, First Award Program of ARISE in EPSCoR 2148878).\n\nMore Information needed" ]
81e608d093175117785c217698ffcc8ee4c25cfe
# Dataset Card for "MixSNIPS_inference" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
chirunder/MixSNIPS_inference
[ "region:us" ]
2023-10-19T19:09:02+00:00
{"dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "completion", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "prediction", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 5196889, "num_examples": 5001}], "download_size": 1806789, "dataset_size": 5196889}, "configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}]}
2023-10-19T19:09:05+00:00
[]
[]
TAGS #region-us
# Dataset Card for "MixSNIPS_inference" More Information needed
[ "# Dataset Card for \"MixSNIPS_inference\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"MixSNIPS_inference\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"MixSNIPS_inference\"\n\nMore Information needed" ]
f140f46b09f8ec6cec8ed1a10a2a68f9cd5d5537
# Dataset Card for "lotr-book" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Giulietta/lotr-book
[ "region:us" ]
2023-10-19T19:19:14+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2432593, "num_examples": 1}], "download_size": 0, "dataset_size": 2432593}}
2023-10-20T07:26:40+00:00
[]
[]
TAGS #region-us
# Dataset Card for "lotr-book" More Information needed
[ "# Dataset Card for \"lotr-book\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"lotr-book\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"lotr-book\"\n\nMore Information needed" ]
fa3ae23ec59b111bf789df7d61c3317a03695ddc
# Dataset Card for "llama2_7b_fine_tuning_complete_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
hemantk089/llama2_7b_fine_tuning_complete_dataset
[ "region:us" ]
2023-10-19T19:28:30+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 261946, "num_examples": 917}], "download_size": 70457, "dataset_size": 261946}}
2023-10-19T19:28:31+00:00
[]
[]
TAGS #region-us
# Dataset Card for "llama2_7b_fine_tuning_complete_dataset" More Information needed
[ "# Dataset Card for \"llama2_7b_fine_tuning_complete_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"llama2_7b_fine_tuning_complete_dataset\"\n\nMore Information needed" ]
[ 6, 27 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"llama2_7b_fine_tuning_complete_dataset\"\n\nMore Information needed" ]
1f0a67f7cabdcf653622e82b7e69be56dbe893c9
# Dataset Card for "hindi_siqa_mini" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
sam2ai/hindi_siqa_mini
[ "region:us" ]
2023-10-19T19:55:47+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}, {"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answerA", "dtype": "string"}, {"name": "answerB", "dtype": "string"}, {"name": "answerC", "dtype": "string"}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "validation", "num_bytes": 23348, "num_examples": 50}, {"name": "train", "num_bytes": 23348, "num_examples": 50}], "download_size": 32064, "dataset_size": 46696}}
2023-10-19T20:47:43+00:00
[]
[]
TAGS #region-us
# Dataset Card for "hindi_siqa_mini" More Information needed
[ "# Dataset Card for \"hindi_siqa_mini\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"hindi_siqa_mini\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"hindi_siqa_mini\"\n\nMore Information needed" ]
d6d0258a76ec194f8ae2946ab12a9b75dd38aa8e
# Dataset Card for "hindi_story_cloze" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
sam2ai/hindi_story_cloze_mini
[ "region:us" ]
2023-10-19T20:05:07+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "eval", "path": "data/eval-*"}]}], "dataset_info": {"features": [{"name": "story_id", "dtype": "string"}, {"name": "input_sentence_1", "dtype": "string"}, {"name": "input_sentence_2", "dtype": "string"}, {"name": "input_sentence_3", "dtype": "string"}, {"name": "input_sentence_4", "dtype": "string"}, {"name": "sentence_quiz1", "dtype": "string"}, {"name": "sentence_quiz2", "dtype": "string"}, {"name": "answer_right_ending", "dtype": "int32"}], "splits": [{"name": "train", "num_bytes": 39375, "num_examples": 50}, {"name": "eval", "num_bytes": 39375, "num_examples": 50}], "download_size": 55954, "dataset_size": 78750}}
2023-10-20T19:06:35+00:00
[]
[]
TAGS #region-us
# Dataset Card for "hindi_story_cloze" More Information needed
[ "# Dataset Card for \"hindi_story_cloze\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"hindi_story_cloze\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"hindi_story_cloze\"\n\nMore Information needed" ]
5857f98fabdd1cb5d6397def59728c8877314dad
# Dataset Card for "Hardware Phi-1.5B Median Dataset" **✉ Correspondence to:** Weimin Fu ([email protected]) or Xiaolong Guo ([email protected]) ## Citation Information Please cite the following paper when using the OSHD Dataset. ``` @article{fuhardware, title={Hardware Phi-1.5 B: A Large Language Model Encodes Hardware Domain Specific Knowledge}, author={Fu, Weimin and Li, Shijie and Zhao, Yifang and Ma, Haocheng and Dutta, Raj and Zhang, Xuan and Yang, Kaichen and Jin, Yier and Guo, Xiaolong}, journal={29th IEEE/ACM Asia and South Pacific Design Automation Conference (ASP-DAC)}, year={2024} } ``` ### Update from our group for Hardware domain-specific LLM: Blog: [Large Language Model for Hardware Security](https://ece.k-state.edu/research/hardware-security/llm.html) HomePage: [Hardware Security Lab](https://ece.k-state.edu/research/hardware-security/) ## Acknowledgment Portions of this work were supported by the National Science Foundation (CCF-2019310, First Award Program of ARISE in EPSCoR 2148878). [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
KSU-HW-SEC/hardware_code_and_sec_median
[ "region:us" ]
2023-10-19T20:08:12+00:00
{"dataset_info": {"features": [{"name": "content", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 32385359072, "num_examples": 742435}], "download_size": 15639570366, "dataset_size": 32385359072}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2024-01-23T04:56:55+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Hardware Phi-1.5B Median Dataset" Correspondence to: Weimin Fu (weiminf@URL) or Xiaolong Guo (guoxiaolong@URL) Please cite the following paper when using the OSHD Dataset. ### Update from our group for Hardware domain-specific LLM: Blog: Large Language Model for Hardware Security HomePage: Hardware Security Lab ## Acknowledgment Portions of this work were supported by the National Science Foundation (CCF-2019310, First Award Program of ARISE in EPSCoR 2148878). More Information needed
[ "# Dataset Card for \"Hardware Phi-1.5B Median Dataset\"\n Correspondence to: Weimin Fu (weiminf@URL) or Xiaolong Guo (guoxiaolong@URL)\n\nPlease cite the following paper when using the OSHD Dataset.", "### Update from our group for Hardware domain-specific LLM:\n\nBlog: Large Language Model for Hardware Security\n\nHomePage: Hardware Security Lab", "## Acknowledgment\nPortions of this work were supported by the National Science Foundation (CCF-2019310, First Award Program of ARISE in EPSCoR 2148878).\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Hardware Phi-1.5B Median Dataset\"\n Correspondence to: Weimin Fu (weiminf@URL) or Xiaolong Guo (guoxiaolong@URL)\n\nPlease cite the following paper when using the OSHD Dataset.", "### Update from our group for Hardware domain-specific LLM:\n\nBlog: Large Language Model for Hardware Security\n\nHomePage: Hardware Security Lab", "## Acknowledgment\nPortions of this work were supported by the National Science Foundation (CCF-2019310, First Award Program of ARISE in EPSCoR 2148878).\n\nMore Information needed" ]
[ 6, 61, 28, 43 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"Hardware Phi-1.5B Median Dataset\"\n Correspondence to: Weimin Fu (weiminf@URL) or Xiaolong Guo (guoxiaolong@URL)\n\nPlease cite the following paper when using the OSHD Dataset.### Update from our group for Hardware domain-specific LLM:\n\nBlog: Large Language Model for Hardware Security\n\nHomePage: Hardware Security Lab## Acknowledgment\nPortions of this work were supported by the National Science Foundation (CCF-2019310, First Award Program of ARISE in EPSCoR 2148878).\n\nMore Information needed" ]
e88ecb12be397629a293a9530d9c6473768f837e
# Dataset Card for "hindi_arc_e_mini" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
sam2ai/hindi_arc_e_mini
[ "region:us" ]
2023-10-19T20:31:28+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "answerKey", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "question", "struct": [{"name": "choices", "list": [{"name": "label", "dtype": "string"}, {"name": "text", "dtype": "string"}]}, {"name": "stem", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 30675, "num_examples": 50}], "download_size": 19352, "dataset_size": 30675}}
2023-10-19T20:31:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "hindi_arc_e_mini" More Information needed
[ "# Dataset Card for \"hindi_arc_e_mini\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"hindi_arc_e_mini\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"hindi_arc_e_mini\"\n\nMore Information needed" ]
c82c93e178883c9c0f55ccf321a8a5ec61212a8d
# Dataset Card for "helm-charts-synthetic" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
peterbeamish/helm-charts-synthetic
[ "region:us" ]
2023-10-19T20:32:56+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "chart_name", "dtype": "string"}, {"name": "templates", "sequence": "string"}, {"name": "values", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1441326, "num_examples": 471}, {"name": "test", "num_bytes": 1440681, "num_examples": 472}], "download_size": 709558, "dataset_size": 2882007}}
2023-10-19T20:33:03+00:00
[]
[]
TAGS #region-us
# Dataset Card for "helm-charts-synthetic" More Information needed
[ "# Dataset Card for \"helm-charts-synthetic\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"helm-charts-synthetic\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"helm-charts-synthetic\"\n\nMore Information needed" ]
762f7a2a4eaf0af6f22c863f67c67c0e9f303558
# Dataset Card for "humaneval-py-mutants" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
nuprl/humaneval-py-mutants
[ "region:us" ]
2023-10-19T20:40:51+00:00
{"dataset_info": {"features": [{"name": "name", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "tests", "dtype": "string"}, {"name": "prompt", "dtype": "string"}, {"name": "stop_tokens", "sequence": "string"}, {"name": "correct", "dtype": "string"}, {"name": "mutants", "sequence": "string"}, {"name": "errors", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 742882, "num_examples": 161}], "download_size": 245158, "dataset_size": 742882}}
2023-10-19T20:48:18+00:00
[]
[]
TAGS #region-us
# Dataset Card for "humaneval-py-mutants" More Information needed
[ "# Dataset Card for \"humaneval-py-mutants\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"humaneval-py-mutants\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"humaneval-py-mutants\"\n\nMore Information needed" ]
80beeaf9944f7f93d7cd03798dfec6545caf13f4
# Dataset Card for "Hardware Phi-1.5B Large Dataset" **✉ Correspondence to:** Weimin Fu ([email protected]) or Xiaolong Guo ([email protected]) ## Citation Information Please cite the following paper when using the OSHD Dataset. ``` @article{fuhardware, title={Hardware Phi-1.5 B: A Large Language Model Encodes Hardware Domain Specific Knowledge}, author={Fu, Weimin and Li, Shijie and Zhao, Yifang and Ma, Haocheng and Dutta, Raj and Zhang, Xuan and Yang, Kaichen and Jin, Yier and Guo, Xiaolong}, journal={29th IEEE/ACM Asia and South Pacific Design Automation Conference (ASP-DAC)}, year={2024} } ``` ### Update from our group for Hardware domain-specific LLM: Blog: [Large Language Model for Hardware Security](https://ece.k-state.edu/research/hardware-security/llm.html) HomePage: [Hardware Security Lab](https://ece.k-state.edu/research/hardware-security/) ## Acknowledgment Portions of this work were supported by the National Science Foundation (CCF-2019310, First Award Program of ARISE in EPSCoR 2148878). [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
KSU-HW-SEC/hardware_code_and_sec_large
[ "region:us" ]
2023-10-19T20:58:30+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "content", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 84704737874, "num_examples": 3590612}], "download_size": 12339312125, "dataset_size": 84704737874}}
2024-01-23T04:56:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Hardware Phi-1.5B Large Dataset" Correspondence to: Weimin Fu (weiminf@URL) or Xiaolong Guo (guoxiaolong@URL) Please cite the following paper when using the OSHD Dataset. ### Update from our group for Hardware domain-specific LLM: Blog: Large Language Model for Hardware Security HomePage: Hardware Security Lab ## Acknowledgment Portions of this work were supported by the National Science Foundation (CCF-2019310, First Award Program of ARISE in EPSCoR 2148878). More Information needed
[ "# Dataset Card for \"Hardware Phi-1.5B Large Dataset\"\n\n Correspondence to: Weimin Fu (weiminf@URL) or Xiaolong Guo (guoxiaolong@URL)\n\nPlease cite the following paper when using the OSHD Dataset.", "### Update from our group for Hardware domain-specific LLM:\n\nBlog: Large Language Model for Hardware Security\n\nHomePage: Hardware Security Lab", "## Acknowledgment\nPortions of this work were supported by the National Science Foundation (CCF-2019310, First Award Program of ARISE in EPSCoR 2148878).\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Hardware Phi-1.5B Large Dataset\"\n\n Correspondence to: Weimin Fu (weiminf@URL) or Xiaolong Guo (guoxiaolong@URL)\n\nPlease cite the following paper when using the OSHD Dataset.", "### Update from our group for Hardware domain-specific LLM:\n\nBlog: Large Language Model for Hardware Security\n\nHomePage: Hardware Security Lab", "## Acknowledgment\nPortions of this work were supported by the National Science Foundation (CCF-2019310, First Award Program of ARISE in EPSCoR 2148878).\n\nMore Information needed" ]
[ 6, 60, 28, 43 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"Hardware Phi-1.5B Large Dataset\"\n\n Correspondence to: Weimin Fu (weiminf@URL) or Xiaolong Guo (guoxiaolong@URL)\n\nPlease cite the following paper when using the OSHD Dataset.### Update from our group for Hardware domain-specific LLM:\n\nBlog: Large Language Model for Hardware Security\n\nHomePage: Hardware Security Lab## Acknowledgment\nPortions of this work were supported by the National Science Foundation (CCF-2019310, First Award Program of ARISE in EPSCoR 2148878).\n\nMore Information needed" ]
2fad01d3a156a2e8c638a089d40b43789e30b348
# Dataset Card for "encodec_24khz-opt-125m-pretrained-ft-librispeech_asr_dummy-validation-features" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
cmu-mlsp/encodec_24khz-opt-125m-pretrained-ft-librispeech_asr_dummy-validation-features
[ "region:us" ]
2023-10-19T21:59:57+00:00
{"dataset_info": {"features": [{"name": "file", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 24000}}}, {"name": "text", "dtype": "string"}, {"name": "speaker_id", "dtype": "int64"}, {"name": "chapter_id", "dtype": "int64"}, {"name": "id", "dtype": "string"}, {"name": "audio_codes", "sequence": {"sequence": "int64"}}], "splits": [{"name": "validation", "num_bytes": 23693835.0, "num_examples": 73}], "download_size": 22836090, "dataset_size": 23693835.0}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-10-20T04:42:02+00:00
[]
[]
TAGS #region-us
# Dataset Card for "encodec_24khz-opt-125m-pretrained-ft-librispeech_asr_dummy-validation-features" More Information needed
[ "# Dataset Card for \"encodec_24khz-opt-125m-pretrained-ft-librispeech_asr_dummy-validation-features\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"encodec_24khz-opt-125m-pretrained-ft-librispeech_asr_dummy-validation-features\"\n\nMore Information needed" ]
[ 6, 45 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"encodec_24khz-opt-125m-pretrained-ft-librispeech_asr_dummy-validation-features\"\n\nMore Information needed" ]
835034979c3a71cf26a552f7514c69270b2b9b72
# Dataset card for "multimodal_satire" This is the dataset for the paper [A Multi-Modal Method for Satire Detection using Textual and Visual Cues](https://aclanthology.org/2020.nlp4if-1.4/). To obtain the full-text body of the articles, you need to scrape websites using the provided links in the dataset. * GitHub repository: [https://github.com/lilyli2004/satire](https://github.com/lilyli2004/satire) ## Reference If you use this dataset, please cite the following paper: ``` @inproceedings{li-etal-2020-multi-modal, title = "A Multi-Modal Method for Satire Detection using Textual and Visual Cues", author = "Li, Lily and Levi, Or and Hosseini, Pedram and Broniatowski, David", booktitle = "Proceedings of the 3rd NLP4IF Workshop on NLP for Internet Freedom: Censorship, Disinformation, and Propaganda", month = dec, year = "2020", address = "Barcelona, Spain (Online)", publisher = "International Committee on Computational Linguistics (ICCL)", url = "https://aclanthology.org/2020.nlp4if-1.4", pages = "33--38", abstract = "Satire is a form of humorous critique, but it is sometimes misinterpreted by readers as legitimate news, which can lead to harmful consequences. We observe that the images used in satirical news articles often contain absurd or ridiculous content and that image manipulation is used to create fictional scenarios. While previous work have studied text-based methods, in this work we propose a multi-modal approach based on state-of-the-art visiolinguistic model ViLBERT. To this end, we create a new dataset consisting of images and headlines of regular and satirical news for the task of satire detection. We fine-tune ViLBERT on the dataset and train a convolutional neural network that uses an image forensics technique. Evaluation on the dataset shows that our proposed multi-modal approach outperforms image-only, text-only, and simple fusion baselines.", } ```
phosseini/multimodal_satire
[ "task_categories:image-classification", "size_categories:1K<n<10K", "language:en", "region:us" ]
2023-10-19T22:05:51+00:00
{"language": ["en"], "size_categories": ["1K<n<10K"], "task_categories": ["image-classification"], "dataset_info": {"features": [{"name": "url", "dtype": "string"}, {"name": "headline", "dtype": "string"}, {"name": "image_link", "dtype": "string"}, {"name": "is_satire", "dtype": "int32"}], "splits": [{"name": "train", "num_bytes": 2841764, "num_examples": 10000}], "download_size": 1268537, "dataset_size": 2841764}}
2023-10-19T22:22:11+00:00
[]
[ "en" ]
TAGS #task_categories-image-classification #size_categories-1K<n<10K #language-English #region-us
# Dataset card for "multimodal_satire" This is the dataset for the paper A Multi-Modal Method for Satire Detection using Textual and Visual Cues. To obtain the full-text body of the articles, you need to scrape websites using the provided links in the dataset. * GitHub repository: URL ## Reference If you use this dataset, please cite the following paper:
[ "# Dataset card for \"multimodal_satire\"\n\nThis is the dataset for the paper A Multi-Modal Method for Satire Detection using Textual and Visual Cues. To obtain the full-text body of the articles, you need to scrape websites using the provided links in the dataset.\n* GitHub repository: URL", "## Reference\nIf you use this dataset, please cite the following paper:" ]
[ "TAGS\n#task_categories-image-classification #size_categories-1K<n<10K #language-English #region-us \n", "# Dataset card for \"multimodal_satire\"\n\nThis is the dataset for the paper A Multi-Modal Method for Satire Detection using Textual and Visual Cues. To obtain the full-text body of the articles, you need to scrape websites using the provided links in the dataset.\n* GitHub repository: URL", "## Reference\nIf you use this dataset, please cite the following paper:" ]
[ 33, 75, 15 ]
[ "passage: TAGS\n#task_categories-image-classification #size_categories-1K<n<10K #language-English #region-us \n# Dataset card for \"multimodal_satire\"\n\nThis is the dataset for the paper A Multi-Modal Method for Satire Detection using Textual and Visual Cues. To obtain the full-text body of the articles, you need to scrape websites using the provided links in the dataset.\n* GitHub repository: URL## Reference\nIf you use this dataset, please cite the following paper:" ]
cc6ed2ae0bcefade91cd02a7f77ae4bab80161a6
# Dataset Card for "textbooks_grounded2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
vikp/textbooks_grounded2
[ "region:us" ]
2023-10-19T22:19:55+00:00
{"dataset_info": {"features": [{"name": "topic", "dtype": "string"}, {"name": "model", "dtype": "string"}, {"name": "concepts", "sequence": "null"}, {"name": "outline", "sequence": "string"}, {"name": "markdown", "dtype": "string"}, {"name": "potential_outline", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 2130200, "num_examples": 21}], "download_size": 892130, "dataset_size": 2130200}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-19T22:19:57+00:00
[]
[]
TAGS #region-us
# Dataset Card for "textbooks_grounded2" More Information needed
[ "# Dataset Card for \"textbooks_grounded2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"textbooks_grounded2\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"textbooks_grounded2\"\n\nMore Information needed" ]
f1747037b5d750e7fa67656087f78c179bcae882
# Dataset Card for "split_dataset_16-1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
bh8648/split_dataset_16-1
[ "region:us" ]
2023-10-19T22:27:21+00:00
{"dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "page_num", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 417525, "num_examples": 105}], "download_size": 211741, "dataset_size": 417525}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-19T22:27:24+00:00
[]
[]
TAGS #region-us
# Dataset Card for "split_dataset_16-1" More Information needed
[ "# Dataset Card for \"split_dataset_16-1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"split_dataset_16-1\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"split_dataset_16-1\"\n\nMore Information needed" ]
fafdd9779fb8c115763896fd42213efbb01ea818
# Dataset Card for "split_dataset_16-2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
bh8648/split_dataset_16-2
[ "region:us" ]
2023-10-19T22:27:24+00:00
{"dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "page_num", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 475910, "num_examples": 105}], "download_size": 217195, "dataset_size": 475910}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-19T22:27:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for "split_dataset_16-2" More Information needed
[ "# Dataset Card for \"split_dataset_16-2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"split_dataset_16-2\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"split_dataset_16-2\"\n\nMore Information needed" ]
7da2d3883bd6cd5722cf091e23618fc9391ab204
# Dataset Card for "multiapi_prototype_CVECPE_Only" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
venkat-srinivasan-nexusflow/multiapi_prototype_CVECPE_Only
[ "region:us" ]
2023-10-19T22:49:16+00:00
{"dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "prediction", "dtype": "string"}, {"name": "ground_truth", "dtype": "string"}, {"name": "correctness", "dtype": "int64"}], "splits": [{"name": "split_20231020_172523", "num_bytes": 23946, "num_examples": 78}, {"name": "split_20231019_234916", "num_bytes": 23946, "num_examples": 78}], "download_size": 28725, "dataset_size": 47892}}
2023-11-29T21:19:09+00:00
[]
[]
TAGS #region-us
# Dataset Card for "multiapi_prototype_CVECPE_Only" More Information needed
[ "# Dataset Card for \"multiapi_prototype_CVECPE_Only\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"multiapi_prototype_CVECPE_Only\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"multiapi_prototype_CVECPE_Only\"\n\nMore Information needed" ]
8fecfbcff31acd25008f891cf1907d53f5c5da9c
# Dataset Card for "imagenet-1k-same" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
acozma/imagenet-1k-same
[ "region:us" ]
2023-10-19T23:18:36+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "conditioning_image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 424146912400.0, "num_examples": 500000}], "download_size": 46152816632, "dataset_size": 424146912400.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-02T19:06:25+00:00
[]
[]
TAGS #region-us
# Dataset Card for "imagenet-1k-same" More Information needed
[ "# Dataset Card for \"imagenet-1k-same\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"imagenet-1k-same\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"imagenet-1k-same\"\n\nMore Information needed" ]
e361f3d4058ae263cdad9c3d94678292a4481333
# Dataset Card for "moss-003-sft-chinese-zhtw" ## 資料集摘要 本資料集主要是應用於專案:[MOSS: 開源對話語言模型](https://github.com/OpenLMLab/MOSS) 所收集的數據。 [MOSS]((https://github.com/OpenLMLab/MOSS)) 是支援中英雙語和多種外掛程式的開源對話語言模型,moss-moon 系列模型具有160億參數,在FP16精度下可在單張A100/A800或兩張3090顯示卡運行,在INT4/8精度下可在單張3090顯示卡運行。 MOSS基座語言模型在約七千億中英文以及程式碼單字上預訓練得到,後續經過對話指令微調、插件增強學習和人類偏好訓練具備多輪對話能力及使用多種插件的能力。 ## 原始資料來源 - [moss-003-sft-data](https://github.com/OpenLMLab/MOSS/tree/main/SFT_data): `moss-moon-003-sft` 所使用的多輪對話數據,基於 MOSS-002 內測階段採集的約10萬用戶輸入數據和 gpt-3.5-turbo 構造而成,相比 `moss-002-sft-data`,`moss-003-sft-data` 更符合真實使用者意圖分佈,包含更細緻的有用性類別標記、更廣泛的無害性資料和更長對話輪數,約含110萬條對話數據。完整資料已全部開源。 - [YeungNLP/moss-003-sft-data](https://huggingface.co/datasets/YeungNLP/moss-003-sft-data) ## 資料下載清理 在原始資料集的基礎上,[YeungNLP/moss-003-sft-data](https://huggingface.co/datasets/YeungNLP/moss-003-sft-data) 去除了冗餘訊息,僅提取出有效的對話訊息,並且調整資料格式,以便在訓練中更加靈活地組織資料格式。 1. 下載 [YeungNLP/moss-003-sft-data](https://huggingface.co/datasets/YeungNLP/moss-003-sft-data) 2. 使用 OpenCC 來進行簡繁轉換 3. 使用 Huggingface Datasets 來上傳至 Huggingface Hub ## 資料集結構 本資料集原始為jsonl格式,每一行的樣本為一個多輪對話數據,格式如下: ```json { "conversation_id":1, "category":"Brainstorming", "conversation":[ { "human":"如何保障工作中遵循正確的安全準則?", "assistant":"為了保障工作中遵循正確的安全準則,以下是一些建議:\n\n1.瞭解相關安全規定和標準:瞭解相關的安全規定和標準,並遵守它們。這可以包括公司和政府的安全標準,以及行業標準和最佳實踐。\n\n2.培訓和教育:確保您和您的同事接受了必要的培訓和教育,以瞭解正確的安全準則和行為。\n\n3.使用正確的工具和裝置:確保您使用正確的工具和裝置,並且它們得到了正確的維護和保養。\n\n4.個人防護裝備:確保您和您的同事穿戴正確的個人防護裝備,如安全鞋、透明眼鏡或面罩、手套等。\n\n5.建立一個安全文化:鼓勵個人對安全的責任感和意識,並建立一個安全文化,使人們始終關注他們自己和他人的安全。\n\n6.持續監測和改進:持續監測和改進安全準則和程式,以確保它們保持最新,並適應變化的情況。\n\n這些措施可以幫助您和您的同事在工作中保障正確的安全準則,確保您的工作場所是一個安全的環境。" }, { "human":"如何應對突發緊急情況?", "assistant":"突發緊急情況可能會在任何時間發生,為了應對這些情況,以下是一些建議:\n\n1.保持冷靜:保持冷靜和清醒,以便能夠更好地應對情況。\n\n2.評估情況:評估緊急情況的性質和範圍,以便了解應該採取哪些行動。\n\n3.觸發應急計劃:如果您的機構或公司有應急計劃,立即觸發它,並按照計劃的步驟行動。\n\n4.聯絡相關人員:通知必要的人員,例如緊急服務、管理層或其他相關人員,並與他們保持聯絡。\n\n5.遵循安全準則:在處理緊急情況時,請遵循正確的安全準則,確保自己和其他人的安全。\n\n6.提供及時資訊:儘快向相關人員提供準確的資訊,以便他們能夠做出正確的決策。\n\n7.合作應對:與其他人合作應對緊急情況並相互支援。\n\n總之,應對突發緊急情況需要保持冷靜和清醒,評估情況並採取適當的行動。同時,需要遵循正確的安全準則,並與其他人合作應對緊急情況。" }, { "human":"在網路安全方面,如何避免遭受駭客攻擊?", "assistant":"以下是一些避免遭受駭客攻擊的建議:\n\n1.更新安全軟體:確保您的作業系統、應用程式和安全軟體是最新的,並隨時更新它們。這可以幫助您及時發現和修復安全漏洞。\n\n2.使用強密碼:使用強密碼,包括字母、數字和特殊字元。不要使用容易猜測的密碼,如生日、姓名或常用單詞。\n\n3.使用多因素身份驗證:使用多因素身份驗證來增加賬戶的安全性,例如傳送簡訊驗證碼或使用指紋識別。\n\n4.小心附件和連結:不要開啟未知的附件或連結,因為它們可能包含惡意軟體。\n\n5.備份重要檔案:定期備份重要檔案,並將其儲存在安全的位置。\n\n6.保護個人資訊:不要在網際網路上公開個人資訊,例如出生日期、地址或社交安全號碼。\n\n7.使用加密連線:在訪問敏感資訊時,使用加密連線,如HTTPS。\n\n8.教育員工:向員工提供網路安全培訓,並確保他們瞭解正確的安全措施和行為。\n\n總之,避免遭受駭客攻擊需要採取多種措施,包括更新安全軟體、使用強密碼、使用多因素身份驗證、小心附件和連結、備份重要檔案、保護個人資訊、使用加密連線以及教育員工。這些措施可以幫助您保護個人資訊和資料免受駭客攻擊。" }, { "human":"如何確保網路連線的安全性?", "assistant":"以下是一些確保網路連線安全性的建議:\n\n1.使用加密連線:使用加密連線,如HTTPS,以確保資料傳輸過程中不被竊聽或篡改。\n\n2.使用防病毒軟體:安裝和更新防病毒軟體,可以幫助檢測和清除惡意軟體。\n\n3.使用防火牆:使用防火牆保護網路和裝置,可以幫助阻止未經授權的訪問和攻擊。\n\n4.禁止未經授權的訪問:禁止未經授權的訪問,包括限制網路訪問和裝置訪問許可權。\n\n5.定期更新作業系統和應用程式:定期更新作業系統和應用程式,以修復安全漏洞和缺陷。\n\n6.使用強密碼:使用強密碼,包括字母、數字和特殊字元,並定期更改密碼。\n\n7.控制訪問:控制訪問網路和裝置的人員,僅授權人員可訪問重要資源。\n\n8.教育員工:向員工提供網路安全培訓,並確保他們瞭解正確的安全措施和行為。\n\n總之,確保網路連線的安全性需要採取多種措施,包括使用加密連線、使用防病毒軟體、使用防火牆、禁止未經授權的訪問、定期更新作業系統和應用程式、使用強密碼、控制訪問和教育員工。這些措施可以幫助您保護網路和裝置免受未經授權的訪問和攻擊。" }, { "human":"對於金融機構來說,如何保護客戶的隱私和資料安全?", "assistant":"金融機構需要對客戶的隱私和資料安全高度重視,以下是一些保護客戶隱私和資料安全的建議:\n\n1.遵守合規要求:確保遵守所有適用的合規要求和監管標準,包括資料隱私法規和安全標準。\n\n2.加強身份驗證:採用多因素身份驗證,例如使用簡訊驗證碼、指紋識別等,以確保客戶身份的真實性。\n\n3.加密敏感資料:使用加密技術對敏感資料進行加密,以保護客戶資訊不被未經授權的人員獲得。\n\n4.建立安全控制:建立安全控制,包括訪問控制、安全審計和安全事件響應,以保護客戶資訊的安全性。\n\n5.教育員工:向員工提供網路安全培訓,並確保他們瞭解正確的安全措施和行為。\n\n6.定期測試:定期進行安全測試和漏洞掃描,以發現和修復潛在的安全漏洞。\n\n7.建立應急計劃:建立應急計劃,並對其進行定期測試,以確保在緊急情況下能夠及時響應和恢復。\n\n總之,保護客戶隱私和資料安全需要採取多種措施,包括遵守合規要求、加強身份驗證、加密敏感資料、建立安全控制、教育員工、定期測試和建立應急計劃。這些措施可以幫助金融機構保護客戶資訊的安全性和隱私。" } ] } ``` ## 資料欄位 - `conversation_id`: (int64) 資料編號 - `category`: (string) 對話類別 - `conversation`: (list) 多輪對話 - `human`: (string) 使用者角色的詢問 - `assistant`: (string) 小助手的回答 ## 如何使用 ```python from datasets import load_dataset dataset = load_dataset("erhwenkuo/moss-003-sft-chinese-zhtw", split="train") ``` ## 許可資訊 [CC BY-NC 4.0](https://creativecommons.org/licenses/by-nc/4.0/deed.zh-hant) ## 引用 ``` @article{sun2023moss, title={MOSS: Training Conversational Language Models from Synthetic Data}, author={Tianxiang Sun and Xiaotian Zhang and Zhengfu He and Peng Li and Qinyuan Cheng and Hang Yan and Xiangyang Liu and Yunfan Shao and Qiong Tang and Xingjian Zhao and Ke Chen and Yining Zheng and Zhejian Zhou and Ruixiao Li and Jun Zhan and Yunhua Zhou and Linyang Li and Xiaogui Yang and Lingling Wu and Zhangyue Yin and Xuanjing Huang and Xipeng Qiu}, year={2023} } ```
erhwenkuo/moss-003-sft-chinese-zhtw
[ "task_categories:conversational", "size_categories:1M<n<10M", "language:zh", "license:cc", "region:us" ]
2023-10-19T23:19:41+00:00
{"language": ["zh"], "license": "cc", "size_categories": ["1M<n<10M"], "task_categories": ["conversational"], "dataset_info": {"features": [{"name": "conversation_id", "dtype": "int64"}, {"name": "category", "dtype": "string"}, {"name": "conversation", "list": [{"name": "human", "dtype": "string"}, {"name": "assistant", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 8438001353, "num_examples": 1074551}], "download_size": 4047825896, "dataset_size": 8438001353}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-20T23:38:13+00:00
[]
[ "zh" ]
TAGS #task_categories-conversational #size_categories-1M<n<10M #language-Chinese #license-cc #region-us
# Dataset Card for "moss-003-sft-chinese-zhtw" ## 資料集摘要 本資料集主要是應用於專案:MOSS: 開源對話語言模型 所收集的數據。 MOSS) 是支援中英雙語和多種外掛程式的開源對話語言模型,moss-moon 系列模型具有160億參數,在FP16精度下可在單張A100/A800或兩張3090顯示卡運行,在INT4/8精度下可在單張3090顯示卡運行。 MOSS基座語言模型在約七千億中英文以及程式碼單字上預訓練得到,後續經過對話指令微調、插件增強學習和人類偏好訓練具備多輪對話能力及使用多種插件的能力。 ## 原始資料來源 - moss-003-sft-data: 'moss-moon-003-sft' 所使用的多輪對話數據,基於 MOSS-002 內測階段採集的約10萬用戶輸入數據和 gpt-3.5-turbo 構造而成,相比 'moss-002-sft-data','moss-003-sft-data' 更符合真實使用者意圖分佈,包含更細緻的有用性類別標記、更廣泛的無害性資料和更長對話輪數,約含110萬條對話數據。完整資料已全部開源。 - YeungNLP/moss-003-sft-data ## 資料下載清理 在原始資料集的基礎上,YeungNLP/moss-003-sft-data 去除了冗餘訊息,僅提取出有效的對話訊息,並且調整資料格式,以便在訓練中更加靈活地組織資料格式。 1. 下載 YeungNLP/moss-003-sft-data 2. 使用 OpenCC 來進行簡繁轉換 3. 使用 Huggingface Datasets 來上傳至 Huggingface Hub ## 資料集結構 本資料集原始為jsonl格式,每一行的樣本為一個多輪對話數據,格式如下: ## 資料欄位 - 'conversation_id': (int64) 資料編號 - 'category': (string) 對話類別 - 'conversation': (list) 多輪對話 - 'human': (string) 使用者角色的詢問 - 'assistant': (string) 小助手的回答 ## 如何使用 ## 許可資訊 CC BY-NC 4.0 ## 引用
[ "# Dataset Card for \"moss-003-sft-chinese-zhtw\"", "## 資料集摘要\n\n本資料集主要是應用於專案:MOSS: 開源對話語言模型 所收集的數據。\n\nMOSS) 是支援中英雙語和多種外掛程式的開源對話語言模型,moss-moon 系列模型具有160億參數,在FP16精度下可在單張A100/A800或兩張3090顯示卡運行,在INT4/8精度下可在單張3090顯示卡運行。 MOSS基座語言模型在約七千億中英文以及程式碼單字上預訓練得到,後續經過對話指令微調、插件增強學習和人類偏好訓練具備多輪對話能力及使用多種插件的能力。", "## 原始資料來源\n\n- moss-003-sft-data: 'moss-moon-003-sft' 所使用的多輪對話數據,基於 MOSS-002 內測階段採集的約10萬用戶輸入數據和 gpt-3.5-turbo 構造而成,相比 'moss-002-sft-data','moss-003-sft-data' 更符合真實使用者意圖分佈,包含更細緻的有用性類別標記、更廣泛的無害性資料和更長對話輪數,約含110萬條對話數據。完整資料已全部開源。\n- YeungNLP/moss-003-sft-data", "## 資料下載清理\n\n在原始資料集的基礎上,YeungNLP/moss-003-sft-data 去除了冗餘訊息,僅提取出有效的對話訊息,並且調整資料格式,以便在訓練中更加靈活地組織資料格式。\n\n1. 下載 YeungNLP/moss-003-sft-data\n2. 使用 OpenCC 來進行簡繁轉換\n3. 使用 Huggingface Datasets 來上傳至 Huggingface Hub", "## 資料集結構\n\n本資料集原始為jsonl格式,每一行的樣本為一個多輪對話數據,格式如下:", "## 資料欄位\n\n- 'conversation_id': (int64) 資料編號\n- 'category': (string) 對話類別\n- 'conversation': (list) 多輪對話\n - 'human': (string) 使用者角色的詢問\n - 'assistant': (string) 小助手的回答", "## 如何使用", "## 許可資訊\n\nCC BY-NC 4.0", "## 引用" ]
[ "TAGS\n#task_categories-conversational #size_categories-1M<n<10M #language-Chinese #license-cc #region-us \n", "# Dataset Card for \"moss-003-sft-chinese-zhtw\"", "## 資料集摘要\n\n本資料集主要是應用於專案:MOSS: 開源對話語言模型 所收集的數據。\n\nMOSS) 是支援中英雙語和多種外掛程式的開源對話語言模型,moss-moon 系列模型具有160億參數,在FP16精度下可在單張A100/A800或兩張3090顯示卡運行,在INT4/8精度下可在單張3090顯示卡運行。 MOSS基座語言模型在約七千億中英文以及程式碼單字上預訓練得到,後續經過對話指令微調、插件增強學習和人類偏好訓練具備多輪對話能力及使用多種插件的能力。", "## 原始資料來源\n\n- moss-003-sft-data: 'moss-moon-003-sft' 所使用的多輪對話數據,基於 MOSS-002 內測階段採集的約10萬用戶輸入數據和 gpt-3.5-turbo 構造而成,相比 'moss-002-sft-data','moss-003-sft-data' 更符合真實使用者意圖分佈,包含更細緻的有用性類別標記、更廣泛的無害性資料和更長對話輪數,約含110萬條對話數據。完整資料已全部開源。\n- YeungNLP/moss-003-sft-data", "## 資料下載清理\n\n在原始資料集的基礎上,YeungNLP/moss-003-sft-data 去除了冗餘訊息,僅提取出有效的對話訊息,並且調整資料格式,以便在訓練中更加靈活地組織資料格式。\n\n1. 下載 YeungNLP/moss-003-sft-data\n2. 使用 OpenCC 來進行簡繁轉換\n3. 使用 Huggingface Datasets 來上傳至 Huggingface Hub", "## 資料集結構\n\n本資料集原始為jsonl格式,每一行的樣本為一個多輪對話數據,格式如下:", "## 資料欄位\n\n- 'conversation_id': (int64) 資料編號\n- 'category': (string) 對話類別\n- 'conversation': (list) 多輪對話\n - 'human': (string) 使用者角色的詢問\n - 'assistant': (string) 小助手的回答", "## 如何使用", "## 許可資訊\n\nCC BY-NC 4.0", "## 引用" ]
[ 38, 20, 151, 154, 103, 29, 74, 4, 9, 3 ]
[ "passage: TAGS\n#task_categories-conversational #size_categories-1M<n<10M #language-Chinese #license-cc #region-us \n# Dataset Card for \"moss-003-sft-chinese-zhtw\"## 資料集摘要\n\n本資料集主要是應用於專案:MOSS: 開源對話語言模型 所收集的數據。\n\nMOSS) 是支援中英雙語和多種外掛程式的開源對話語言模型,moss-moon 系列模型具有160億參數,在FP16精度下可在單張A100/A800或兩張3090顯示卡運行,在INT4/8精度下可在單張3090顯示卡運行。 MOSS基座語言模型在約七千億中英文以及程式碼單字上預訓練得到,後續經過對話指令微調、插件增強學習和人類偏好訓練具備多輪對話能力及使用多種插件的能力。## 原始資料來源\n\n- moss-003-sft-data: 'moss-moon-003-sft' 所使用的多輪對話數據,基於 MOSS-002 內測階段採集的約10萬用戶輸入數據和 gpt-3.5-turbo 構造而成,相比 'moss-002-sft-data','moss-003-sft-data' 更符合真實使用者意圖分佈,包含更細緻的有用性類別標記、更廣泛的無害性資料和更長對話輪數,約含110萬條對話數據。完整資料已全部開源。\n- YeungNLP/moss-003-sft-data## 資料下載清理\n\n在原始資料集的基礎上,YeungNLP/moss-003-sft-data 去除了冗餘訊息,僅提取出有效的對話訊息,並且調整資料格式,以便在訓練中更加靈活地組織資料格式。\n\n1. 下載 YeungNLP/moss-003-sft-data\n2. 使用 OpenCC 來進行簡繁轉換\n3. 使用 Huggingface Datasets 來上傳至 Huggingface Hub## 資料集結構\n\n本資料集原始為jsonl格式,每一行的樣本為一個多輪對話數據,格式如下:" ]
771f541c5b87b5d1a4d0b35a15e6029f2837590a
# Dataset Card for "emotional_response_spanish_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
BrunoGR/emotional_response_spanish_dataset
[ "region:us" ]
2023-10-19T23:30:25+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "index", "dtype": "float64"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "Prompt_sp", "dtype": "string"}, {"name": "Prompt_mix", "dtype": "string"}, {"name": "Prompt_en", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 139130014, "num_examples": 41910}, {"name": "test", "num_bytes": 5047940, "num_examples": 1320}, {"name": "validation", "num_bytes": 8297080, "num_examples": 2220}], "download_size": 43129906, "dataset_size": 152475034}}
2023-11-21T06:47:22+00:00
[]
[]
TAGS #region-us
# Dataset Card for "emotional_response_spanish_dataset" More Information needed
[ "# Dataset Card for \"emotional_response_spanish_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"emotional_response_spanish_dataset\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"emotional_response_spanish_dataset\"\n\nMore Information needed" ]
2ec8a93b064695e0a3fd960fef9872560e66e594
# Dataset Card for "hard_captions" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
iwecht/hard_captions
[ "region:us" ]
2023-10-19T23:34:59+00:00
{"dataset_info": {"features": [{"name": "annID", "dtype": "int64"}, {"name": "caption", "dtype": "string"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 364027, "num_examples": 5000}], "download_size": 200465, "dataset_size": 364027}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-19T23:35:00+00:00
[]
[]
TAGS #region-us
# Dataset Card for "hard_captions" More Information needed
[ "# Dataset Card for \"hard_captions\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"hard_captions\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"hard_captions\"\n\nMore Information needed" ]
36f29c5fbb8a2c28e2f46028fb03b1ad690d94da
# Data thông tin tổng quát về các trường - Số lượng: 616
H4438/education-university
[ "region:us" ]
2023-10-20T00:07:40+00:00
{}
2023-11-16T10:14:12+00:00
[]
[]
TAGS #region-us
# Data thông tin tổng quát về các trường - Số lượng: 616
[ "# Data thông tin tổng quát về các trường\n- Số lượng: 616" ]
[ "TAGS\n#region-us \n", "# Data thông tin tổng quát về các trường\n- Số lượng: 616" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Data thông tin tổng quát về các trường\n- Số lượng: 616" ]
c8bc0cbe69684da379af3d36424041162d63e702
# Dataset Card for "en-id-parallel-sentences" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
carles-undergrad-thesis/en-id-parallel-sentences
[ "region:us" ]
2023-10-20T00:31:57+00:00
{"dataset_info": {"features": [{"name": "text_en", "dtype": "string"}, {"name": "text_id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 392096944, "num_examples": 1000000}], "download_size": 204794393, "dataset_size": 392096944}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-20T00:32:19+00:00
[]
[]
TAGS #region-us
# Dataset Card for "en-id-parallel-sentences" More Information needed
[ "# Dataset Card for \"en-id-parallel-sentences\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"en-id-parallel-sentences\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"en-id-parallel-sentences\"\n\nMore Information needed" ]
3909333bc433404eba550ce2c789c9d1dfa3db90
# GPT Wiki Intro Extension This dataset is extension of aadityaubhat/GPT-wiki-intro. 1000 promts processed through several LLM witout sampling. (opt-125m, opt-1.3b, opt-2.7b, llama2-7b-chat, and llama2-13b-chat) Schema for the dataset |Column |Datatype|Description | |---------------------|--------|-------------------------------------------| |id |int64 |ID from original dataset | |generated |string |Model's output | |source |string |opt-125m, opt-1.3b, opt-2.7b, llama2-7b or llama2-13b | ``` @misc {yatsy, author = { {Kirill Safronov} }, title = { GPT-wiki-intro-extension }, year = 2023, url = { https://huggingface.co/datasets/yatsy/GPT-wiki-intro-extension }, publisher = { Hugging Face } } ```
yatsy/GPT-wiki-intro-extension
[ "task_categories:text-classification", "annotations_creators:no-annotation", "language_creators:machine-generated", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:aadityaubhat/GPT-wiki-intro", "language:en", "license:cc", "facebook/opt-1.3b", "facebook/opt-2.7b", "facebook/opt-125m", "meta-llama/Llama-2-7b-chat-hf", "meta-llama/Llama-2-13b-chat-hf", "region:us" ]
2023-10-20T00:37:31+00:00
{"annotations_creators": ["no-annotation"], "language_creators": ["machine-generated"], "language": ["en"], "license": ["cc"], "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "source_datasets": ["aadityaubhat/GPT-wiki-intro"], "task_categories": ["text-classification"], "task_ids": [], "pretty_name": "GPT-wiki-intro-extension", "tags": ["facebook/opt-1.3b", "facebook/opt-2.7b", "facebook/opt-125m", "meta-llama/Llama-2-7b-chat-hf", "meta-llama/Llama-2-13b-chat-hf"], "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "generated", "dtype": "string"}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 6375445, "num_examples": 5000}], "download_size": 2174490, "dataset_size": 6375445}}
2023-10-20T01:03:35+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #annotations_creators-no-annotation #language_creators-machine-generated #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-aadityaubhat/GPT-wiki-intro #language-English #license-cc #facebook/opt-1.3b #facebook/opt-2.7b #facebook/opt-125m #meta-llama/Llama-2-7b-chat-hf #meta-llama/Llama-2-13b-chat-hf #region-us
GPT Wiki Intro Extension ======================== This dataset is extension of aadityaubhat/GPT-wiki-intro. 1000 promts processed through several LLM witout sampling. (opt-125m, opt-1.3b, opt-2.7b, llama2-7b-chat, and llama2-13b-chat) Schema for the dataset Column: id, Datatype: int64, Description: ID from original dataset Column: generated, Datatype: string, Description: Model's output Column: source, Datatype: string, Description: opt-125m, opt-1.3b, opt-2.7b, llama2-7b or llama2-13b
[]
[ "TAGS\n#task_categories-text-classification #annotations_creators-no-annotation #language_creators-machine-generated #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-aadityaubhat/GPT-wiki-intro #language-English #license-cc #facebook/opt-1.3b #facebook/opt-2.7b #facebook/opt-125m #meta-llama/Llama-2-7b-chat-hf #meta-llama/Llama-2-13b-chat-hf #region-us \n" ]
[ 146 ]
[ "passage: TAGS\n#task_categories-text-classification #annotations_creators-no-annotation #language_creators-machine-generated #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-aadityaubhat/GPT-wiki-intro #language-English #license-cc #facebook/opt-1.3b #facebook/opt-2.7b #facebook/opt-125m #meta-llama/Llama-2-7b-chat-hf #meta-llama/Llama-2-13b-chat-hf #region-us \n" ]
1e70d5ebb57c5c2682b764cf4218b19a4f3e33e6
# Dataset Card for "en-id-parallel-sentences-embedding" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
carles-undergrad-thesis/en-id-parallel-sentences-embedding
[ "region:us" ]
2023-10-20T00:57:16+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "text_en", "dtype": "string"}, {"name": "text_id", "dtype": "string"}, {"name": "target_embedding", "sequence": "float32"}, {"name": "input_ids_en", "sequence": "int64"}, {"name": "attention_mask_en", "sequence": "int64"}, {"name": "input_ids_id", "sequence": "int64"}, {"name": "attention_mask_id", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 11676096944, "num_examples": 1000000}], "download_size": 4112187708, "dataset_size": 11676096944}}
2023-10-20T01:02:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "en-id-parallel-sentences-embedding" More Information needed
[ "# Dataset Card for \"en-id-parallel-sentences-embedding\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"en-id-parallel-sentences-embedding\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"en-id-parallel-sentences-embedding\"\n\nMore Information needed" ]
f97338ea83aaf280982f4b955c25df37db650785
<p align="center"> <img src="https://i.ibb.co/WVkDGyW/image.png"/> </p> # Dataset card for batch ## Table of contents - [Dataset description](#dataset-description) - [Dataset summary](#dataset-summary) - [Dataset structure](#dataset-structure) - [Dataset instance](#dataset-instance) - [Dataset fields](#dataset-fields) ## Dataset description - **Homepage**: [batch homepage](https://huggingface.co/datasets/zeio/batch) - **Repository**: [batch repository](https://huggingface.co/datasets/zeio/batch) - **Point of contact**: [Zeio Nara](mailto:[email protected]) - **Dataset version**: `31.10.2023` ### Dataset summary This dataset contains threads parsed from the `/b/` board of [2ch archive][archive]. See dataset viewer at the [derivative repo](/datasets/zeio/auto-batch). **Examples of the dataset reading and usage are provided in [this colab notebook](https://colab.research.google.com/drive/1YOfxiTq6DXIVEaKwyA7TpcTjonaP_A8S?usp=sharing)**. ## Dataset structure The dataset is represented in three formats - **compressed**, **uncompressed** and **spoken**: 1. `uncompressed` representation is the default and simplest one - in this form the content of dataset is organised inside `txt` files which are grouped into clusters inside [`threads` folder](/datasets/zeio/batch/tree/main/threads). The grouping is done due to `git's` constraints, namely, because it's not possible to have more than 10000 files in a single directory. That's why each cluster contains 10000 items (except the last one, which *could* contain fewer elements). Each cluster name has the format `${START_PAGE}-${END_PAGE}`, where `${START_PAGE}` is the index of the first page in the [archive][archive] from which posts have been put into the cluster, and `${END_PAGE}` is the last such paget respectively; 1. `compressed` representation is slightly more sophisticated than the `uncompressed` one - in consists of a set of `tar.xz` files which are nothing more than **the compressed clusters** of `txt` files described above. This representation corresponds to the [`threads-compressed` folder](/datasets/zeio/batch/tree/main/threads-compressed); 1. `spoken` representation consists of `mp3` files with speech generated for **some threads using an alternating speaker voice pattern** meaning that the 1st post is said by the first speaker, the 2nd post is said by the second speaker, the 3rd post is said by the first speaker, the 4th post is said by the second speaker and so on. The speech is generated automatically using a `TTS` engine. The `mp3` files are located in the [`threads-spoken-compressed`](/datasets/zeio/batch/tree/main/threads-spoken-compressed) and are grouped using `tar.xz` archives in the same way as `txt` files in the [`compressed` dataset representation](/datasets/zeio/batch/tree/main/threads-compressed). Concerning particular `txt` files under `threads/\*/` folder, each item here corresponds to **one thread** and is organised as follows: 1. Each non-empty line corresponds to a single post from a user; 1. If a non-empty line follows another non-empty line, then it should be treated as a **comment** to one of the posts above it, a **response** to a request above or as an **answer** to a question; 1. If a non-empty line follows an empty line, it should be treated as a beginning of a discussion or a topic. Therefore, the dataset consists of **threads**, which can be separated into **topics**, which, in turn, consist of **posts**. Posts are the lowermost units in the dataset and are not divided further - they should be interpreted as a plain text. ### Dataset instance The following code snippet contains text for the thread `0000-0019/119540414`: ```sh Всем привет. Нужна помощь богов фотошопа, на картинке надо изменить дату на 09/03/2016 и значения тесто на 86.500++ черес код елемента ебаш Опять ты, сука ебаная? Хули тебе опять надо? СПАСИБО Размер шрифта не совпадает, але. ``` This thread consists of two topics, the first one of which includes 3 posts, and the second - 2 posts. Therefore, this dataset entry can be represented in json in the following format: ```sh { "title": "Всем привет. Нужна помощь богов фотошопа, на картинке надо изменить дату на 09/03/2016 и значения тесто на 86.500++", "topics": [ { "posts": [ { "text": "Всем привет. Нужна помощь богов фотошопа, на картинке надо изменить дату на 09/03/2016 и значения тесто на 86.500++" }, { "text": "черес код елемента ебаш" }, { "text": "Опять ты, сука ебаная? Хули тебе опять надо?" } ] }, { "posts": [ { "text": "СПАСИБО" }, { "text": "Размер шрифта не совпадает, але." } ] } ] } ``` ### Dataset fields In `written` configuration the dataset is represented as a list of `Thread` objects, each `Thread` has a single property `topics`, which contains a list of `Topic` objects. Each `Topic` object has a single property `posts`, which points to the list of `Post` objects, making up the `Topic`. Each `Post` object contains a single property `text` which contains text representation of the post (essentially `text` is `html` code without `tags` and explicit links to other posts; there may still be implicit links to other posts in a form of quotes, prefixed with `>` symbol). As an additional field, each instance has a property `title` which is equivalent to the thread's main post content. In `spoken` configuration the structure is basically the same, but some `Thread` objects have and additional property `speech` with a spoken representation of the thread. [archive]: https://2ch.hk/b/arch/
zeio/batch
[ "task_categories:text-generation", "task_categories:text-classification", "task_categories:question-answering", "language_creators:crowdsourced", "size_categories:100K<n<1M", "language:ru", "language:en", "license:apache-2.0", "social-networks", "not-for-all-audiences", "region:us" ]
2023-10-20T01:12:50+00:00
{"language_creators": ["crowdsourced"], "language": ["ru", "en"], "license": "apache-2.0", "size_categories": ["100K<n<1M"], "task_categories": ["text-generation", "text-classification", "question-answering"], "pretty_name": "batch", "tags": ["social-networks", "not-for-all-audiences"], "annotation_creators": ["crowdsourced"], "dataset_info": [{"config_name": "written", "features": [{"name": "title", "dtype": "string"}, {"name": "topics", "sequence": [{"name": "posts", "sequence": [{"name": "text", "dtype": "string"}]}]}]}, {"config_name": "spoken", "features": [{"name": "title", "dtype": "string"}, {"name": "speech", "dtype": "audio"}, {"name": "topics", "sequence": [{"name": "posts", "sequence": [{"name": "text", "dtype": "string"}]}]}]}]}
2023-12-13T21:19:54+00:00
[]
[ "ru", "en" ]
TAGS #task_categories-text-generation #task_categories-text-classification #task_categories-question-answering #language_creators-crowdsourced #size_categories-100K<n<1M #language-Russian #language-English #license-apache-2.0 #social-networks #not-for-all-audiences #region-us
<p align="center"> <img src="https://i.URL </p> # Dataset card for batch ## Table of contents - Dataset description - Dataset summary - Dataset structure - Dataset instance - Dataset fields ## Dataset description - Homepage: batch homepage - Repository: batch repository - Point of contact: Zeio Nara - Dataset version: '31.10.2023' ### Dataset summary This dataset contains threads parsed from the '/b/' board of [2ch archive][archive]. See dataset viewer at the derivative repo. Examples of the dataset reading and usage are provided in this colab notebook. ## Dataset structure The dataset is represented in three formats - compressed, uncompressed and spoken: 1. 'uncompressed' representation is the default and simplest one - in this form the content of dataset is organised inside 'txt' files which are grouped into clusters inside 'threads' folder. The grouping is done due to 'git's' constraints, namely, because it's not possible to have more than 10000 files in a single directory. That's why each cluster contains 10000 items (except the last one, which *could* contain fewer elements). Each cluster name has the format '${START_PAGE}-${END_PAGE}', where '${START_PAGE}' is the index of the first page in the [archive][archive] from which posts have been put into the cluster, and '${END_PAGE}' is the last such paget respectively; 1. 'compressed' representation is slightly more sophisticated than the 'uncompressed' one - in consists of a set of 'URL' files which are nothing more than the compressed clusters of 'txt' files described above. This representation corresponds to the 'threads-compressed' folder; 1. 'spoken' representation consists of 'mp3' files with speech generated for some threads using an alternating speaker voice pattern meaning that the 1st post is said by the first speaker, the 2nd post is said by the second speaker, the 3rd post is said by the first speaker, the 4th post is said by the second speaker and so on. The speech is generated automatically using a 'TTS' engine. The 'mp3' files are located in the 'threads-spoken-compressed' and are grouped using 'URL' archives in the same way as 'txt' files in the 'compressed' dataset representation. Concerning particular 'txt' files under 'threads/\*/' folder, each item here corresponds to one thread and is organised as follows: 1. Each non-empty line corresponds to a single post from a user; 1. If a non-empty line follows another non-empty line, then it should be treated as a comment to one of the posts above it, a response to a request above or as an answer to a question; 1. If a non-empty line follows an empty line, it should be treated as a beginning of a discussion or a topic. Therefore, the dataset consists of threads, which can be separated into topics, which, in turn, consist of posts. Posts are the lowermost units in the dataset and are not divided further - they should be interpreted as a plain text. ### Dataset instance The following code snippet contains text for the thread '0000-0019/119540414': This thread consists of two topics, the first one of which includes 3 posts, and the second - 2 posts. Therefore, this dataset entry can be represented in json in the following format: ### Dataset fields In 'written' configuration the dataset is represented as a list of 'Thread' objects, each 'Thread' has a single property 'topics', which contains a list of 'Topic' objects. Each 'Topic' object has a single property 'posts', which points to the list of 'Post' objects, making up the 'Topic'. Each 'Post' object contains a single property 'text' which contains text representation of the post (essentially 'text' is 'html' code without 'tags' and explicit links to other posts; there may still be implicit links to other posts in a form of quotes, prefixed with '>' symbol). As an additional field, each instance has a property 'title' which is equivalent to the thread's main post content. In 'spoken' configuration the structure is basically the same, but some 'Thread' objects have and additional property 'speech' with a spoken representation of the thread. [archive]: URL
[ "# Dataset card for batch", "## Table of contents\n\n- Dataset description\n - Dataset summary\n- Dataset structure\n - Dataset instance\n - Dataset fields", "## Dataset description\n\n- Homepage: batch homepage\n- Repository: batch repository\n- Point of contact: Zeio Nara\n- Dataset version: '31.10.2023'", "### Dataset summary\n\nThis dataset contains threads parsed from the '/b/' board of [2ch archive][archive]. See dataset viewer at the derivative repo. Examples of the dataset reading and usage are provided in this colab notebook.", "## Dataset structure\n\nThe dataset is represented in three formats - compressed, uncompressed and spoken:\n\n1. 'uncompressed' representation is the default and simplest one - in this form the content of dataset is organised inside 'txt' files which are grouped into clusters inside 'threads' folder. The grouping is done due to 'git's' constraints, namely, because it's not possible to have more than 10000 files in a single directory. That's why each cluster contains 10000 items (except the last one, which *could* contain fewer elements). Each cluster name has the format '${START_PAGE}-${END_PAGE}', where '${START_PAGE}' is the index of the first page in the [archive][archive] from which posts have been put into the cluster, and '${END_PAGE}' is the last such paget respectively;\n1. 'compressed' representation is slightly more sophisticated than the 'uncompressed' one - in consists of a set of 'URL' files which are nothing more than the compressed clusters of 'txt' files described above. This representation corresponds to the 'threads-compressed' folder;\n1. 'spoken' representation consists of 'mp3' files with speech generated for some threads using an alternating speaker voice pattern meaning that the 1st post is said by the first speaker, the 2nd post is said by the second speaker, the 3rd post is said by the first speaker, the 4th post is said by the second speaker and so on. The speech is generated automatically using a 'TTS' engine. The 'mp3' files are located in the 'threads-spoken-compressed' and are grouped using 'URL' archives in the same way as 'txt' files in the 'compressed' dataset representation.\n\nConcerning particular 'txt' files under 'threads/\\*/' folder, each item here corresponds to one thread and is organised as follows:\n\n1. Each non-empty line corresponds to a single post from a user;\n1. If a non-empty line follows another non-empty line, then it should be treated as a comment to one of the posts above it, a response to a request above or as an answer to a question;\n1. If a non-empty line follows an empty line, it should be treated as a beginning of a discussion or a topic.\n\nTherefore, the dataset consists of threads, which can be separated into topics, which, in turn, consist of posts. Posts are the lowermost units in the dataset and are not divided further - they should be interpreted as a plain text.", "### Dataset instance\n\nThe following code snippet contains text for the thread '0000-0019/119540414':\n\n\n\nThis thread consists of two topics, the first one of which includes 3 posts, and the second - 2 posts.\n\nTherefore, this dataset entry can be represented in json in the following format:", "### Dataset fields\n\nIn 'written' configuration the dataset is represented as a list of 'Thread' objects, each 'Thread' has a single property 'topics', which contains a list of 'Topic' objects. Each 'Topic' object has a single property 'posts', which points to the list of 'Post' objects, making up the 'Topic'. Each 'Post' object contains a single property 'text' which contains text representation of the post (essentially 'text' is 'html' code without 'tags' and explicit links to other posts; there may still be implicit links to other posts in a form of quotes, prefixed with '>' symbol). As an additional field, each instance has a property 'title' which is equivalent to the thread's main post content. \nIn 'spoken' configuration the structure is basically the same, but some 'Thread' objects have and additional property 'speech' with a spoken representation of the thread.\n\n[archive]: URL" ]
[ "TAGS\n#task_categories-text-generation #task_categories-text-classification #task_categories-question-answering #language_creators-crowdsourced #size_categories-100K<n<1M #language-Russian #language-English #license-apache-2.0 #social-networks #not-for-all-audiences #region-us \n", "# Dataset card for batch", "## Table of contents\n\n- Dataset description\n - Dataset summary\n- Dataset structure\n - Dataset instance\n - Dataset fields", "## Dataset description\n\n- Homepage: batch homepage\n- Repository: batch repository\n- Point of contact: Zeio Nara\n- Dataset version: '31.10.2023'", "### Dataset summary\n\nThis dataset contains threads parsed from the '/b/' board of [2ch archive][archive]. See dataset viewer at the derivative repo. Examples of the dataset reading and usage are provided in this colab notebook.", "## Dataset structure\n\nThe dataset is represented in three formats - compressed, uncompressed and spoken:\n\n1. 'uncompressed' representation is the default and simplest one - in this form the content of dataset is organised inside 'txt' files which are grouped into clusters inside 'threads' folder. The grouping is done due to 'git's' constraints, namely, because it's not possible to have more than 10000 files in a single directory. That's why each cluster contains 10000 items (except the last one, which *could* contain fewer elements). Each cluster name has the format '${START_PAGE}-${END_PAGE}', where '${START_PAGE}' is the index of the first page in the [archive][archive] from which posts have been put into the cluster, and '${END_PAGE}' is the last such paget respectively;\n1. 'compressed' representation is slightly more sophisticated than the 'uncompressed' one - in consists of a set of 'URL' files which are nothing more than the compressed clusters of 'txt' files described above. This representation corresponds to the 'threads-compressed' folder;\n1. 'spoken' representation consists of 'mp3' files with speech generated for some threads using an alternating speaker voice pattern meaning that the 1st post is said by the first speaker, the 2nd post is said by the second speaker, the 3rd post is said by the first speaker, the 4th post is said by the second speaker and so on. The speech is generated automatically using a 'TTS' engine. The 'mp3' files are located in the 'threads-spoken-compressed' and are grouped using 'URL' archives in the same way as 'txt' files in the 'compressed' dataset representation.\n\nConcerning particular 'txt' files under 'threads/\\*/' folder, each item here corresponds to one thread and is organised as follows:\n\n1. Each non-empty line corresponds to a single post from a user;\n1. If a non-empty line follows another non-empty line, then it should be treated as a comment to one of the posts above it, a response to a request above or as an answer to a question;\n1. If a non-empty line follows an empty line, it should be treated as a beginning of a discussion or a topic.\n\nTherefore, the dataset consists of threads, which can be separated into topics, which, in turn, consist of posts. Posts are the lowermost units in the dataset and are not divided further - they should be interpreted as a plain text.", "### Dataset instance\n\nThe following code snippet contains text for the thread '0000-0019/119540414':\n\n\n\nThis thread consists of two topics, the first one of which includes 3 posts, and the second - 2 posts.\n\nTherefore, this dataset entry can be represented in json in the following format:", "### Dataset fields\n\nIn 'written' configuration the dataset is represented as a list of 'Thread' objects, each 'Thread' has a single property 'topics', which contains a list of 'Topic' objects. Each 'Topic' object has a single property 'posts', which points to the list of 'Post' objects, making up the 'Topic'. Each 'Post' object contains a single property 'text' which contains text representation of the post (essentially 'text' is 'html' code without 'tags' and explicit links to other posts; there may still be implicit links to other posts in a form of quotes, prefixed with '>' symbol). As an additional field, each instance has a property 'title' which is equivalent to the thread's main post content. \nIn 'spoken' configuration the structure is basically the same, but some 'Thread' objects have and additional property 'speech' with a spoken representation of the thread.\n\n[archive]: URL" ]
[ 95, 7, 26, 41, 60, 625, 70, 229 ]
[ "passage: TAGS\n#task_categories-text-generation #task_categories-text-classification #task_categories-question-answering #language_creators-crowdsourced #size_categories-100K<n<1M #language-Russian #language-English #license-apache-2.0 #social-networks #not-for-all-audiences #region-us \n# Dataset card for batch## Table of contents\n\n- Dataset description\n - Dataset summary\n- Dataset structure\n - Dataset instance\n - Dataset fields## Dataset description\n\n- Homepage: batch homepage\n- Repository: batch repository\n- Point of contact: Zeio Nara\n- Dataset version: '31.10.2023'### Dataset summary\n\nThis dataset contains threads parsed from the '/b/' board of [2ch archive][archive]. See dataset viewer at the derivative repo. Examples of the dataset reading and usage are provided in this colab notebook." ]
cb0436a0630d366e8dd4805484acde2c1a19fb33
# Dataset Card for "SECOND_KOWIKI_RETRIEVE_200_V2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
jjonhwa/SECOND_KOWIKI_RETRIEVE_200_V2
[ "region:us" ]
2023-10-20T02:19:15+00:00
{"dataset_info": {"features": [{"name": "ctxs", "list": [{"name": "score", "dtype": "float64"}, {"name": "text", "dtype": "string"}]}, {"name": "summary", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 141924897, "num_examples": 15504}], "download_size": 75209045, "dataset_size": 141924897}}
2023-10-20T02:19:31+00:00
[]
[]
TAGS #region-us
# Dataset Card for "SECOND_KOWIKI_RETRIEVE_200_V2" More Information needed
[ "# Dataset Card for \"SECOND_KOWIKI_RETRIEVE_200_V2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"SECOND_KOWIKI_RETRIEVE_200_V2\"\n\nMore Information needed" ]
[ 6, 26 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"SECOND_KOWIKI_RETRIEVE_200_V2\"\n\nMore Information needed" ]
3148b62d11cc5752e7dc5ac6dce08e0325d5573c
# Dataset Card for "SECOND_KOWIKI_RETRIEVE_300_V2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
jjonhwa/SECOND_KOWIKI_RETRIEVE_300_V2
[ "region:us" ]
2023-10-20T02:19:44+00:00
{"dataset_info": {"features": [{"name": "ctxs", "list": [{"name": "score", "dtype": "float64"}, {"name": "text", "dtype": "string"}]}, {"name": "summary", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 189146280, "num_examples": 15504}], "download_size": 97933190, "dataset_size": 189146280}}
2023-10-20T02:19:54+00:00
[]
[]
TAGS #region-us
# Dataset Card for "SECOND_KOWIKI_RETRIEVE_300_V2" More Information needed
[ "# Dataset Card for \"SECOND_KOWIKI_RETRIEVE_300_V2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"SECOND_KOWIKI_RETRIEVE_300_V2\"\n\nMore Information needed" ]
[ 6, 26 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"SECOND_KOWIKI_RETRIEVE_300_V2\"\n\nMore Information needed" ]
2460d3a56b7a767acbfa7b77fea879332f0082d6
### Dataset Description <!-- Provide a longer summary of what this dataset is. --> [법률구조공단](https://www.klac.or.kr/)의 법률구조상담 웹페이지를 크롤링하여 구축한 데이터셋 입니다.
jihye-moon/klac_legal_aid_counseling
[ "task_categories:conversational", "task_categories:text-classification", "size_categories:1K<n<10K", "language:ko", "le", "region:us" ]
2023-10-20T02:26:51+00:00
{"language": ["ko"], "size_categories": ["1K<n<10K"], "task_categories": ["conversational", "text-classification"], "tags": ["le"]}
2023-10-20T02:49:28+00:00
[]
[ "ko" ]
TAGS #task_categories-conversational #task_categories-text-classification #size_categories-1K<n<10K #language-Korean #le #region-us
### Dataset Description 법률구조공단의 법률구조상담 웹페이지를 크롤링하여 구축한 데이터셋 입니다.
[ "### Dataset Description\n\n\n\n법률구조공단의 법률구조상담 웹페이지를 크롤링하여 구축한 데이터셋 입니다." ]
[ "TAGS\n#task_categories-conversational #task_categories-text-classification #size_categories-1K<n<10K #language-Korean #le #region-us \n", "### Dataset Description\n\n\n\n법률구조공단의 법률구조상담 웹페이지를 크롤링하여 구축한 데이터셋 입니다." ]
[ 46, 25 ]
[ "passage: TAGS\n#task_categories-conversational #task_categories-text-classification #size_categories-1K<n<10K #language-Korean #le #region-us \n### Dataset Description\n\n\n\n법률구조공단의 법률구조상담 웹페이지를 크롤링하여 구축한 데이터셋 입니다." ]
a420c5d8a0377ef395bec7ea15a7a118c187747d
# Dataset Card for "GraySpectrogram2" ## Dataset info: * ```mb23/GraySpectrogram```で使用できるサブセットのみから画像とキャプションだけの組にしたもの。 ## How to use dataset: ```python (!pip install datasets) import datasets from datasets import load_dataset dataset = load_datasets("mb23/GraySpectrogram") ``` [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mb23/GraySpectrogram2
[ "region:us" ]
2023-10-20T02:37:18+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "caption", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1063161338.75, "num_examples": 9258}, {"name": "test", "num_bytes": 982933207.75, "num_examples": 8722}], "download_size": 2041757562, "dataset_size": 2046094546.5}}
2023-10-20T02:55:56+00:00
[]
[]
TAGS #region-us
# Dataset Card for "GraySpectrogram2" ## Dataset info: * で使用できるサブセットのみから画像とキャプションだけの組にしたもの。 ## How to use dataset: More Information needed
[ "# Dataset Card for \"GraySpectrogram2\"", "## Dataset info:\n* で使用できるサブセットのみから画像とキャプションだけの組にしたもの。", "## How to use dataset:\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"GraySpectrogram2\"", "## Dataset info:\n* で使用できるサブセットのみから画像とキャプションだけの組にしたもの。", "## How to use dataset:\n\nMore Information needed" ]
[ 6, 14, 25, 10 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"GraySpectrogram2\"## Dataset info:\n* で使用できるサブセットのみから画像とキャプションだけの組にしたもの。## How to use dataset:\n\nMore Information needed" ]
949daa3c7718ab1d08115d14f316aaffb1cb9916
<div style='background: #ffeef1; border: 1px solid #fd91a4; padding:1em; border-radius:3px; margin-bottom:2em;'> <h3 style='margin:0'>NSFW</h3> <p style='margin:0'>This dataset is not suitable for use by minors. The dataset contains X-rated/NFSW content.</p> </div> <div style='background: #eefff1; border: 1px solid #a4fd91; padding:1em; border-radius:3px; margin-bottom:2em;'> <h3 style='margin:0'>For Finetuning Only</h3> <p style='margin:0'>Unless you are running a finetuning run, you should use the <a href="https://huggingface.co/datasets/hearmeneigh/e621-rising-v3-curated">curated V3 dataset</a>.</p> </div>
hearmeneigh/e621-rising-v3-finetuner
[ "anthro", "furry", "e621", "nsfw", "not-for-all-audiences", "region:us" ]
2023-10-20T02:46:13+00:00
{"dataset_info": {"features": [{"name": "source_id", "dtype": "string"}, {"name": "source", "dtype": "string"}, {"name": "image", "dtype": "image"}, {"name": "tags", "sequence": "string"}, {"name": "url", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "selector", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 8063277865.625, "num_examples": 41099}], "download_size": 8015931448, "dataset_size": 8063277865.625}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "tags": ["anthro", "furry", "e621", "nsfw", "not-for-all-audiences"]}
2023-10-22T20:36:54+00:00
[]
[]
TAGS #anthro #furry #e621 #nsfw #not-for-all-audiences #region-us
<div style='background: #ffeef1; border: 1px solid #fd91a4; padding:1em; border-radius:3px; margin-bottom:2em;'> <h3 style='margin:0'>NSFW</h3> <p style='margin:0'>This dataset is not suitable for use by minors. The dataset contains X-rated/NFSW content.</p> </div> <div style='background: #eefff1; border: 1px solid #a4fd91; padding:1em; border-radius:3px; margin-bottom:2em;'> <h3 style='margin:0'>For Finetuning Only</h3> <p style='margin:0'>Unless you are running a finetuning run, you should use the <a href="URL V3 dataset</a>.</p> </div>
[]
[ "TAGS\n#anthro #furry #e621 #nsfw #not-for-all-audiences #region-us \n" ]
[ 29 ]
[ "passage: TAGS\n#anthro #furry #e621 #nsfw #not-for-all-audiences #region-us \n" ]
3b9859020a79a668eb3356a32a0106c73d84de01
# Dataset Card for "github-issues" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Sober-Clever/github-issues
[ "region:us" ]
2023-10-20T02:51:08+00:00
{"dataset_info": {"features": [{"name": "url", "dtype": "string"}, {"name": "repository_url", "dtype": "string"}, {"name": "labels_url", "dtype": "string"}, {"name": "comments_url", "dtype": "string"}, {"name": "events_url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "number", "dtype": "int64"}, {"name": "title", "dtype": "string"}, {"name": "user", "struct": [{"name": "login", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "avatar_url", "dtype": "string"}, {"name": "gravatar_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "followers_url", "dtype": "string"}, {"name": "following_url", "dtype": "string"}, {"name": "gists_url", "dtype": "string"}, {"name": "starred_url", "dtype": "string"}, {"name": "subscriptions_url", "dtype": "string"}, {"name": "organizations_url", "dtype": "string"}, {"name": "repos_url", "dtype": "string"}, {"name": "events_url", "dtype": "string"}, {"name": "received_events_url", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "site_admin", "dtype": "bool"}]}, {"name": "labels", "list": [{"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "color", "dtype": "string"}, {"name": "default", "dtype": "bool"}, {"name": "description", "dtype": "string"}]}, {"name": "state", "dtype": "string"}, {"name": "locked", "dtype": "bool"}, {"name": "assignee", "struct": [{"name": "login", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "avatar_url", "dtype": "string"}, {"name": "gravatar_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "followers_url", "dtype": "string"}, {"name": "following_url", "dtype": "string"}, {"name": "gists_url", "dtype": "string"}, {"name": "starred_url", "dtype": "string"}, {"name": "subscriptions_url", "dtype": "string"}, {"name": "organizations_url", "dtype": "string"}, {"name": "repos_url", "dtype": "string"}, {"name": "events_url", "dtype": "string"}, {"name": "received_events_url", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "site_admin", "dtype": "bool"}]}, {"name": "assignees", "list": [{"name": "login", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "avatar_url", "dtype": "string"}, {"name": "gravatar_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "followers_url", "dtype": "string"}, {"name": "following_url", "dtype": "string"}, {"name": "gists_url", "dtype": "string"}, {"name": "starred_url", "dtype": "string"}, {"name": "subscriptions_url", "dtype": "string"}, {"name": "organizations_url", "dtype": "string"}, {"name": "repos_url", "dtype": "string"}, {"name": "events_url", "dtype": "string"}, {"name": "received_events_url", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "site_admin", "dtype": "bool"}]}, {"name": "milestone", "struct": [{"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "labels_url", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "number", "dtype": "int64"}, {"name": "title", "dtype": "string"}, {"name": "description", "dtype": "string"}, {"name": "creator", "struct": [{"name": "login", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "avatar_url", "dtype": "string"}, {"name": "gravatar_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "followers_url", "dtype": "string"}, {"name": "following_url", "dtype": "string"}, {"name": "gists_url", "dtype": "string"}, {"name": "starred_url", "dtype": "string"}, {"name": "subscriptions_url", "dtype": "string"}, {"name": "organizations_url", "dtype": "string"}, {"name": "repos_url", "dtype": "string"}, {"name": "events_url", "dtype": "string"}, {"name": "received_events_url", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "site_admin", "dtype": "bool"}]}, {"name": "open_issues", "dtype": "int64"}, {"name": "closed_issues", "dtype": "int64"}, {"name": "state", "dtype": "string"}, {"name": "created_at", "dtype": "timestamp[s]"}, {"name": "updated_at", "dtype": "timestamp[s]"}, {"name": "due_on", "dtype": "null"}, {"name": "closed_at", "dtype": "null"}]}, {"name": "comments", "sequence": "string"}, {"name": "created_at", "dtype": "timestamp[s]"}, {"name": "updated_at", "dtype": "timestamp[s]"}, {"name": "closed_at", "dtype": "timestamp[s]"}, {"name": "author_association", "dtype": "string"}, {"name": "active_lock_reason", "dtype": "null"}, {"name": "draft", "dtype": "bool"}, {"name": "pull_request", "struct": [{"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "diff_url", "dtype": "string"}, {"name": "patch_url", "dtype": "string"}, {"name": "merged_at", "dtype": "timestamp[s]"}]}, {"name": "body", "dtype": "string"}, {"name": "reactions", "struct": [{"name": "url", "dtype": "string"}, {"name": "total_count", "dtype": "int64"}, {"name": "+1", "dtype": "int64"}, {"name": "-1", "dtype": "int64"}, {"name": "laugh", "dtype": "int64"}, {"name": "hooray", "dtype": "int64"}, {"name": "confused", "dtype": "int64"}, {"name": "heart", "dtype": "int64"}, {"name": "rocket", "dtype": "int64"}, {"name": "eyes", "dtype": "int64"}]}, {"name": "timeline_url", "dtype": "string"}, {"name": "performed_via_github_app", "dtype": "null"}, {"name": "state_reason", "dtype": "string"}, {"name": "is_pull_request", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 1420461, "num_examples": 100}], "download_size": 513444, "dataset_size": 1420461}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-20T02:51:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "github-issues" More Information needed
[ "# Dataset Card for \"github-issues\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"github-issues\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"github-issues\"\n\nMore Information needed" ]