sha
stringlengths
40
40
text
stringlengths
1
13.4M
id
stringlengths
2
117
tags
listlengths
1
7.91k
created_at
stringlengths
25
25
metadata
stringlengths
2
875k
last_modified
stringlengths
25
25
arxiv
listlengths
0
25
languages
listlengths
0
7.91k
tags_str
stringlengths
17
159k
text_str
stringlengths
1
447k
text_lists
listlengths
0
352
processed_texts
listlengths
1
353
tokens_length
listlengths
1
353
input_texts
listlengths
1
40
e17ebf7917c06b3d40a2446ad062929af76cb6cb
Homepage: https://github.com/GGLAB-KU/turkish-plu/
mcemilg/turkish-plu-step-ordering
[ "task_categories:text-classification", "size_categories:100K<n<1M", "language:tr", "region:us" ]
2023-12-22T12:18:58+00:00
{"language": ["tr"], "size_categories": ["100K<n<1M"], "task_categories": ["text-classification"]}
2023-12-25T17:31:15+00:00
[]
[ "tr" ]
TAGS #task_categories-text-classification #size_categories-100K<n<1M #language-Turkish #region-us
Homepage: URL
[]
[ "TAGS\n#task_categories-text-classification #size_categories-100K<n<1M #language-Turkish #region-us \n" ]
[ 35 ]
[ "passage: TAGS\n#task_categories-text-classification #size_categories-100K<n<1M #language-Turkish #region-us \n" ]
0ebf43f4bf4d6c7e96e3fbe6974c8e4c40fba1ab
Homepage: https://github.com/GGLAB-KU/turkish-plu
mcemilg/turkish-plu-next-event-prediction
[ "task_categories:text-classification", "size_categories:10K<n<100K", "language:tr", "region:us" ]
2023-12-22T12:23:02+00:00
{"language": ["tr"], "size_categories": ["10K<n<100K"], "task_categories": ["text-classification"]}
2023-12-22T12:24:37+00:00
[]
[ "tr" ]
TAGS #task_categories-text-classification #size_categories-10K<n<100K #language-Turkish #region-us
Homepage: URL
[]
[ "TAGS\n#task_categories-text-classification #size_categories-10K<n<100K #language-Turkish #region-us \n" ]
[ 35 ]
[ "passage: TAGS\n#task_categories-text-classification #size_categories-10K<n<100K #language-Turkish #region-us \n" ]
069ae6222aaf825bf42d06cddf2de2aba81bfe15
# Description Guanaco dataset subsets used for experiments in the paper [Turning English-centric LLMs Into Polyglots: How Much Multilinguality Is Needed?](https://arxiv.org/abs/2312.12683) We extend the original Guanaco dataset with language tags, with languages identified using [OpenLID](https://github.com/laurieburchell/open-lid-dataset). The following subsets were used to train our experimental models: | config name | languages | |-------------|----------------------------------------------------------------| | ml1 | en | | ml2, mtml2 | en, es | | ml3, mtml3 | en, es, ru | | ml4, mtml4 | en, es, ru, de | | ml5, mtml5 | en, es, ru, de, zh | | ml6, mtml6 | en, es, ru, de, zh, fr | | guanaco | en, es, ru, de, zh, fr, ca, th, pt, it, uk, eu, jp + many more | Note: - `ml` indicates that the non-English examples are taken directly from OpenAssistant and assumed to be native non-English. - `mtml` indicates that the non-English examples are translated from original English ones using `gpt-3.5-tubo-16k`. # Usage ```python from datasets import load_dataset ds = load_dataset('ZurichNLP/mlit-guanaco', 'ml2') print(ds) >>> DatasetDict({ train: Dataset({ features: ['text', 'lang', 'confidence', 'id'], num_rows: 3200 }) test: Dataset({ features: ['text', 'lang', 'confidence', 'id'], num_rows: 518 }) }) ``` # Citation ``` @misc{dettmers2023qlora, title={QLoRA: Efficient Finetuning of Quantized LLMs}, author={Tim Dettmers and Artidoro Pagnoni and Ari Holtzman and Luke Zettlemoyer}, year={2023}, eprint={2305.14314}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` ``` @misc{kew2023turning, title={Turning English-centric LLMs Into Polyglots: How Much Multilinguality Is Needed?}, author={Tannon Kew and Florian Schottmann and Rico Sennrich}, year={2023}, eprint={2312.12683}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` --- dataset_info: - config_name: guanaco features: - name: text dtype: string - name: lang dtype: string - name: confidence dtype: float64 - name: id dtype: int64 splits: - name: train num_bytes: 16084303 num_examples: 9846 - name: test num_bytes: 854470 num_examples: 518 download_size: 9851098 dataset_size: 16938773 - config_name: ml1 features: - name: text dtype: string - name: lang dtype: string - name: confidence dtype: float64 - name: id dtype: int64 splits: - name: train num_bytes: 5768604 num_examples: 3200 - name: test num_bytes: 854470 num_examples: 518 download_size: 3725469 dataset_size: 6623074 - config_name: ml2 features: - name: text dtype: string - name: lang dtype: string - name: confidence dtype: float64 - name: id dtype: int64 splits: - name: train num_bytes: 5710678 num_examples: 3200 - name: test num_bytes: 854470 num_examples: 518 download_size: 3697276 dataset_size: 6565148 - config_name: ml3 features: - name: text dtype: string - name: lang dtype: string - name: confidence dtype: float64 - name: id dtype: int64 splits: - name: train num_bytes: 5869588 num_examples: 3200 - name: test num_bytes: 854470 num_examples: 518 download_size: 3740699 dataset_size: 6724058 - config_name: ml4 features: - name: text dtype: string - name: lang dtype: string - name: confidence dtype: float64 - name: id dtype: int64 splits: - name: train num_bytes: 5793432 num_examples: 3200 - name: test num_bytes: 854470 num_examples: 518 download_size: 3704436 dataset_size: 6647902 - config_name: ml5 features: - name: text dtype: string - name: lang dtype: string - name: confidence dtype: float64 - name: id dtype: int64 splits: - name: train num_bytes: 5727523 num_examples: 3200 - name: test num_bytes: 854470 num_examples: 518 download_size: 3676576 dataset_size: 6581993 - config_name: ml6 features: - name: text dtype: string - name: lang dtype: string - name: confidence dtype: float64 - name: id dtype: int64 splits: - name: train num_bytes: 5617451 num_examples: 3200 - name: test num_bytes: 854470 num_examples: 518 download_size: 3625589 dataset_size: 6471921 - config_name: mtml2 features: - name: text dtype: string - name: lang dtype: string - name: confidence dtype: float64 - name: id dtype: int64 splits: - name: train num_bytes: 5813848 num_examples: 3200 - name: test num_bytes: 854470 num_examples: 518 download_size: 3717965 dataset_size: 6668318 - config_name: mtml3 features: - name: text dtype: string - name: lang dtype: string - name: confidence dtype: float64 - name: id dtype: int64 splits: - name: train num_bytes: 6105089 num_examples: 3200 - name: test num_bytes: 854470 num_examples: 518 download_size: 3823447 dataset_size: 6959559 - config_name: mtml4 features: - name: text dtype: string - name: lang dtype: string - name: confidence dtype: float64 - name: id dtype: int64 splits: - name: train num_bytes: 6155284 num_examples: 3200 - name: test num_bytes: 854470 num_examples: 518 download_size: 3848827 dataset_size: 7009754 - config_name: mtml5 features: - name: text dtype: string - name: lang dtype: string - name: confidence dtype: float64 - name: id dtype: int64 splits: - name: train num_bytes: 6104544 num_examples: 3200 - name: test num_bytes: 854470 num_examples: 518 download_size: 3836022 dataset_size: 6959014 - config_name: mtml6 features: - name: text dtype: string - name: lang dtype: string - name: confidence dtype: float64 - name: id dtype: int64 splits: - name: train num_bytes: 6174923 num_examples: 3200 - name: test num_bytes: 854470 num_examples: 518 download_size: 3859467 dataset_size: 7029393 configs: - config_name: guanaco data_files: - split: train path: guanaco/train-* - split: test path: guanaco/test-* - config_name: ml1 data_files: - split: train path: ml1/train-* - split: test path: ml1/test-* - config_name: ml2 data_files: - split: train path: ml2/train-* - split: test path: ml2/test-* - config_name: ml3 data_files: - split: train path: ml3/train-* - split: test path: ml3/test-* - config_name: ml4 data_files: - split: train path: ml4/train-* - split: test path: ml4/test-* - config_name: ml5 data_files: - split: train path: ml5/train-* - split: test path: ml5/test-* - config_name: ml6 data_files: - split: train path: ml6/train-* - split: test path: ml6/test-* - config_name: mtml2 data_files: - split: train path: mtml2/train-* - split: test path: mtml2/test-* - config_name: mtml3 data_files: - split: train path: mtml3/train-* - split: test path: mtml3/test-* - config_name: mtml4 data_files: - split: train path: mtml4/train-* - split: test path: mtml4/test-* - config_name: mtml5 data_files: - split: train path: mtml5/train-* - split: test path: mtml5/test-* - config_name: mtml6 data_files: - split: train path: mtml6/train-* - split: test path: mtml6/test-* ---
ZurichNLP/mlit-guanaco
[ "arxiv:2312.12683", "arxiv:2305.14314", "region:us" ]
2023-12-22T12:39:23+00:00
{"dataset_info": [{"config_name": "guanaco", "features": [{"name": "text", "dtype": "string"}, {"name": "lang", "dtype": "string"}, {"name": "confidence", "dtype": "float64"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 16084303, "num_examples": 9846}, {"name": "test", "num_bytes": 854470, "num_examples": 518}], "download_size": 9851098, "dataset_size": 16938773}, {"config_name": "ml1", "features": [{"name": "text", "dtype": "string"}, {"name": "lang", "dtype": "string"}, {"name": "confidence", "dtype": "float64"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 5768604, "num_examples": 3200}, {"name": "test", "num_bytes": 854470, "num_examples": 518}], "download_size": 3725469, "dataset_size": 6623074}, {"config_name": "ml2", "features": [{"name": "text", "dtype": "string"}, {"name": "lang", "dtype": "string"}, {"name": "confidence", "dtype": "float64"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 5710678, "num_examples": 3200}, {"name": "test", "num_bytes": 854470, "num_examples": 518}], "download_size": 3697276, "dataset_size": 6565148}, {"config_name": "ml3", "features": [{"name": "text", "dtype": "string"}, {"name": "lang", "dtype": "string"}, {"name": "confidence", "dtype": "float64"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 5869588, "num_examples": 3200}, {"name": "test", "num_bytes": 854470, "num_examples": 518}], "download_size": 3740699, "dataset_size": 6724058}, {"config_name": "ml4", "features": [{"name": "text", "dtype": "string"}, {"name": "lang", "dtype": "string"}, {"name": "confidence", "dtype": "float64"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 5793432, "num_examples": 3200}, {"name": "test", "num_bytes": 854470, "num_examples": 518}], "download_size": 3704436, "dataset_size": 6647902}, {"config_name": "ml5", "features": [{"name": "text", "dtype": "string"}, {"name": "lang", "dtype": "string"}, {"name": "confidence", "dtype": "float64"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 5727523, "num_examples": 3200}, {"name": "test", "num_bytes": 854470, "num_examples": 518}], "download_size": 3676576, "dataset_size": 6581993}, {"config_name": "ml6", "features": [{"name": "text", "dtype": "string"}, {"name": "lang", "dtype": "string"}, {"name": "confidence", "dtype": "float64"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 5617451, "num_examples": 3200}, {"name": "test", "num_bytes": 854470, "num_examples": 518}], "download_size": 3625589, "dataset_size": 6471921}, {"config_name": "mtml2", "features": [{"name": "text", "dtype": "string"}, {"name": "lang", "dtype": "string"}, {"name": "confidence", "dtype": "float64"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 5813848, "num_examples": 3200}, {"name": "test", "num_bytes": 854470, "num_examples": 518}], "download_size": 3717965, "dataset_size": 6668318}, {"config_name": "mtml3", "features": [{"name": "text", "dtype": "string"}, {"name": "lang", "dtype": "string"}, {"name": "confidence", "dtype": "float64"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 6105089, "num_examples": 3200}, {"name": "test", "num_bytes": 854470, "num_examples": 518}], "download_size": 3823447, "dataset_size": 6959559}, {"config_name": "mtml4", "features": [{"name": "text", "dtype": "string"}, {"name": "lang", "dtype": "string"}, {"name": "confidence", "dtype": "float64"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 6155284, "num_examples": 3200}, {"name": "test", "num_bytes": 854470, "num_examples": 518}], "download_size": 3848827, "dataset_size": 7009754}, {"config_name": "mtml5", "features": [{"name": "text", "dtype": "string"}, {"name": "lang", "dtype": "string"}, {"name": "confidence", "dtype": "float64"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 6104544, "num_examples": 3200}, {"name": "test", "num_bytes": 854470, "num_examples": 518}], "download_size": 3836022, "dataset_size": 6959014}, {"config_name": "mtml6", "features": [{"name": "text", "dtype": "string"}, {"name": "lang", "dtype": "string"}, {"name": "confidence", "dtype": "float64"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 6174923, "num_examples": 3200}, {"name": "test", "num_bytes": 854470, "num_examples": 518}], "download_size": 3859467, "dataset_size": 7029393}], "configs": [{"config_name": "guanaco", "data_files": [{"split": "train", "path": "guanaco/train-*"}, {"split": "test", "path": "guanaco/test-*"}]}, {"config_name": "ml1", "data_files": [{"split": "train", "path": "ml1/train-*"}, {"split": "test", "path": "ml1/test-*"}]}, {"config_name": "ml2", "data_files": [{"split": "train", "path": "ml2/train-*"}, {"split": "test", "path": "ml2/test-*"}]}, {"config_name": "ml3", "data_files": [{"split": "train", "path": "ml3/train-*"}, {"split": "test", "path": "ml3/test-*"}]}, {"config_name": "ml4", "data_files": [{"split": "train", "path": "ml4/train-*"}, {"split": "test", "path": "ml4/test-*"}]}, {"config_name": "ml5", "data_files": [{"split": "train", "path": "ml5/train-*"}, {"split": "test", "path": "ml5/test-*"}]}, {"config_name": "ml6", "data_files": [{"split": "train", "path": "ml6/train-*"}, {"split": "test", "path": "ml6/test-*"}]}, {"config_name": "mtml2", "data_files": [{"split": "train", "path": "mtml2/train-*"}, {"split": "test", "path": "mtml2/test-*"}]}, {"config_name": "mtml3", "data_files": [{"split": "train", "path": "mtml3/train-*"}, {"split": "test", "path": "mtml3/test-*"}]}, {"config_name": "mtml4", "data_files": [{"split": "train", "path": "mtml4/train-*"}, {"split": "test", "path": "mtml4/test-*"}]}, {"config_name": "mtml5", "data_files": [{"split": "train", "path": "mtml5/train-*"}, {"split": "test", "path": "mtml5/test-*"}]}, {"config_name": "mtml6", "data_files": [{"split": "train", "path": "mtml6/train-*"}, {"split": "test", "path": "mtml6/test-*"}]}]}
2023-12-22T13:40:36+00:00
[ "2312.12683", "2305.14314" ]
[]
TAGS #arxiv-2312.12683 #arxiv-2305.14314 #region-us
Description =========== Guanaco dataset subsets used for experiments in the paper Turning English-centric LLMs Into Polyglots: How Much Multilinguality Is Needed? We extend the original Guanaco dataset with language tags, with languages identified using OpenLID. The following subsets were used to train our experimental models: Note: * 'ml' indicates that the non-English examples are taken directly from OpenAssistant and assumed to be native non-English. * 'mtml' indicates that the non-English examples are translated from original English ones using 'gpt-3.5-tubo-16k'. Usage ===== --- dataset\_info: * config\_name: guanaco features: + name: text dtype: string + name: lang dtype: string + name: confidence dtype: float64 + name: id dtype: int64 splits: + name: train num\_bytes: 16084303 num\_examples: 9846 + name: test num\_bytes: 854470 num\_examples: 518 download\_size: 9851098 dataset\_size: 16938773 * config\_name: ml1 features: + name: text dtype: string + name: lang dtype: string + name: confidence dtype: float64 + name: id dtype: int64 splits: + name: train num\_bytes: 5768604 num\_examples: 3200 + name: test num\_bytes: 854470 num\_examples: 518 download\_size: 3725469 dataset\_size: 6623074 * config\_name: ml2 features: + name: text dtype: string + name: lang dtype: string + name: confidence dtype: float64 + name: id dtype: int64 splits: + name: train num\_bytes: 5710678 num\_examples: 3200 + name: test num\_bytes: 854470 num\_examples: 518 download\_size: 3697276 dataset\_size: 6565148 * config\_name: ml3 features: + name: text dtype: string + name: lang dtype: string + name: confidence dtype: float64 + name: id dtype: int64 splits: + name: train num\_bytes: 5869588 num\_examples: 3200 + name: test num\_bytes: 854470 num\_examples: 518 download\_size: 3740699 dataset\_size: 6724058 * config\_name: ml4 features: + name: text dtype: string + name: lang dtype: string + name: confidence dtype: float64 + name: id dtype: int64 splits: + name: train num\_bytes: 5793432 num\_examples: 3200 + name: test num\_bytes: 854470 num\_examples: 518 download\_size: 3704436 dataset\_size: 6647902 * config\_name: ml5 features: + name: text dtype: string + name: lang dtype: string + name: confidence dtype: float64 + name: id dtype: int64 splits: + name: train num\_bytes: 5727523 num\_examples: 3200 + name: test num\_bytes: 854470 num\_examples: 518 download\_size: 3676576 dataset\_size: 6581993 * config\_name: ml6 features: + name: text dtype: string + name: lang dtype: string + name: confidence dtype: float64 + name: id dtype: int64 splits: + name: train num\_bytes: 5617451 num\_examples: 3200 + name: test num\_bytes: 854470 num\_examples: 518 download\_size: 3625589 dataset\_size: 6471921 * config\_name: mtml2 features: + name: text dtype: string + name: lang dtype: string + name: confidence dtype: float64 + name: id dtype: int64 splits: + name: train num\_bytes: 5813848 num\_examples: 3200 + name: test num\_bytes: 854470 num\_examples: 518 download\_size: 3717965 dataset\_size: 6668318 * config\_name: mtml3 features: + name: text dtype: string + name: lang dtype: string + name: confidence dtype: float64 + name: id dtype: int64 splits: + name: train num\_bytes: 6105089 num\_examples: 3200 + name: test num\_bytes: 854470 num\_examples: 518 download\_size: 3823447 dataset\_size: 6959559 * config\_name: mtml4 features: + name: text dtype: string + name: lang dtype: string + name: confidence dtype: float64 + name: id dtype: int64 splits: + name: train num\_bytes: 6155284 num\_examples: 3200 + name: test num\_bytes: 854470 num\_examples: 518 download\_size: 3848827 dataset\_size: 7009754 * config\_name: mtml5 features: + name: text dtype: string + name: lang dtype: string + name: confidence dtype: float64 + name: id dtype: int64 splits: + name: train num\_bytes: 6104544 num\_examples: 3200 + name: test num\_bytes: 854470 num\_examples: 518 download\_size: 3836022 dataset\_size: 6959014 * config\_name: mtml6 features: + name: text dtype: string + name: lang dtype: string + name: confidence dtype: float64 + name: id dtype: int64 splits: + name: train num\_bytes: 6174923 num\_examples: 3200 + name: test num\_bytes: 854470 num\_examples: 518 download\_size: 3859467 dataset\_size: 7029393 configs: * config\_name: guanaco data\_files: + split: train path: guanaco/train-\* + split: test path: guanaco/test-\* * config\_name: ml1 data\_files: + split: train path: ml1/train-\* + split: test path: ml1/test-\* * config\_name: ml2 data\_files: + split: train path: ml2/train-\* + split: test path: ml2/test-\* * config\_name: ml3 data\_files: + split: train path: ml3/train-\* + split: test path: ml3/test-\* * config\_name: ml4 data\_files: + split: train path: ml4/train-\* + split: test path: ml4/test-\* * config\_name: ml5 data\_files: + split: train path: ml5/train-\* + split: test path: ml5/test-\* * config\_name: ml6 data\_files: + split: train path: ml6/train-\* + split: test path: ml6/test-\* * config\_name: mtml2 data\_files: + split: train path: mtml2/train-\* + split: test path: mtml2/test-\* * config\_name: mtml3 data\_files: + split: train path: mtml3/train-\* + split: test path: mtml3/test-\* * config\_name: mtml4 data\_files: + split: train path: mtml4/train-\* + split: test path: mtml4/test-\* * config\_name: mtml5 data\_files: + split: train path: mtml5/train-\* + split: test path: mtml5/test-\* * config\_name: mtml6 data\_files: + split: train path: mtml6/train-\* + split: test path: mtml6/test-\* ---
[]
[ "TAGS\n#arxiv-2312.12683 #arxiv-2305.14314 #region-us \n" ]
[ 24 ]
[ "passage: TAGS\n#arxiv-2312.12683 #arxiv-2305.14314 #region-us \n" ]
5f9694085523f2abe4fb70e20aaab9252cfe7461
# ConvNTM This repository contains supplemental data files for our AAAI 2023 paper, **"[ConvNTM: Conversational Neural Topic Model](https://ojs.aaai.org/index.php/AAAI/article/view/26595)"**. For the implementation code of **ConvNTM**, please visit https://github.com/ssshddd/ConvNTM/. To use the provided data files, follow these steps: 1. Place the `dd_co_matrix.pt` file in the following directory: `processed_data/dailydialogues_ulen150_unum_25/co_matrix.pt`. 2. Place the `emp_co_matrix.pt` file in the following directory: `processed_data/emp_ulen150_unum_8/co_matrix.pt`. These data files are required for running the **ConvNTM** model on the respective datasets.
ssshddd/ConvNTM
[ "region:us" ]
2023-12-22T12:56:31+00:00
{}
2023-12-22T14:40:57+00:00
[]
[]
TAGS #region-us
# ConvNTM This repository contains supplemental data files for our AAAI 2023 paper, "ConvNTM: Conversational Neural Topic Model". For the implementation code of ConvNTM, please visit URL To use the provided data files, follow these steps: 1. Place the 'dd_co_matrix.pt' file in the following directory: 'processed_data/dailydialogues_ulen150_unum_25/co_matrix.pt'. 2. Place the 'emp_co_matrix.pt' file in the following directory: 'processed_data/emp_ulen150_unum_8/co_matrix.pt'. These data files are required for running the ConvNTM model on the respective datasets.
[ "# ConvNTM\n\nThis repository contains supplemental data files for our AAAI 2023 paper, \"ConvNTM: Conversational Neural Topic Model\".\n\nFor the implementation code of ConvNTM, please visit URL\n\nTo use the provided data files, follow these steps:\n\n1. Place the 'dd_co_matrix.pt' file in the following directory: 'processed_data/dailydialogues_ulen150_unum_25/co_matrix.pt'.\n2. Place the 'emp_co_matrix.pt' file in the following directory: 'processed_data/emp_ulen150_unum_8/co_matrix.pt'.\n\nThese data files are required for running the ConvNTM model on the respective datasets." ]
[ "TAGS\n#region-us \n", "# ConvNTM\n\nThis repository contains supplemental data files for our AAAI 2023 paper, \"ConvNTM: Conversational Neural Topic Model\".\n\nFor the implementation code of ConvNTM, please visit URL\n\nTo use the provided data files, follow these steps:\n\n1. Place the 'dd_co_matrix.pt' file in the following directory: 'processed_data/dailydialogues_ulen150_unum_25/co_matrix.pt'.\n2. Place the 'emp_co_matrix.pt' file in the following directory: 'processed_data/emp_ulen150_unum_8/co_matrix.pt'.\n\nThese data files are required for running the ConvNTM model on the respective datasets." ]
[ 6, 170 ]
[ "passage: TAGS\n#region-us \n# ConvNTM\n\nThis repository contains supplemental data files for our AAAI 2023 paper, \"ConvNTM: Conversational Neural Topic Model\".\n\nFor the implementation code of ConvNTM, please visit URL\n\nTo use the provided data files, follow these steps:\n\n1. Place the 'dd_co_matrix.pt' file in the following directory: 'processed_data/dailydialogues_ulen150_unum_25/co_matrix.pt'.\n2. Place the 'emp_co_matrix.pt' file in the following directory: 'processed_data/emp_ulen150_unum_8/co_matrix.pt'.\n\nThese data files are required for running the ConvNTM model on the respective datasets." ]
aef11a801e57e50620ab1c4a989af935771d60a8
# Dataset Card for "global230k" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mespinosami/global230k
[ "region:us" ]
2023-12-22T13:08:41+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 8623524876.02, "num_examples": 162940}, {"name": "validation", "num_bytes": 1335636495.768, "num_examples": 23416}, {"name": "test", "num_bytes": 2572452087.661, "num_examples": 46463}], "download_size": 10844373816, "dataset_size": 12531613459.449}}
2023-12-22T21:52:22+00:00
[]
[]
TAGS #region-us
# Dataset Card for "global230k" More Information needed
[ "# Dataset Card for \"global230k\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"global230k\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"global230k\"\n\nMore Information needed" ]
b842ab8ff1b2130ef2b186ed4f32a4fd76e6abbb
# Dataset Card for "fashion_image_caption-100-v2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
PardeepRassani/fashion_image_caption-100-v2
[ "region:us" ]
2023-12-22T14:10:24+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 22820471.0, "num_examples": 100}], "download_size": 22820373, "dataset_size": 22820471.0}}
2023-12-22T14:10:36+00:00
[]
[]
TAGS #region-us
# Dataset Card for "fashion_image_caption-100-v2" More Information needed
[ "# Dataset Card for \"fashion_image_caption-100-v2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"fashion_image_caption-100-v2\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"fashion_image_caption-100-v2\"\n\nMore Information needed" ]
78764037f3c44ac0cd146c2ede5bbf716c456b72
# Dataset Card for "wikipedia_simple_20231201" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
teddy-f-47/wikipedia_simple_20231201
[ "region:us" ]
2023-12-22T14:36:53+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 293060898, "num_examples": 243561}], "download_size": 157875548, "dataset_size": 293060898}}
2023-12-22T17:36:25+00:00
[]
[]
TAGS #region-us
# Dataset Card for "wikipedia_simple_20231201" More Information needed
[ "# Dataset Card for \"wikipedia_simple_20231201\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"wikipedia_simple_20231201\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"wikipedia_simple_20231201\"\n\nMore Information needed" ]
85db2519caddb5efa361e9402068e6dd63bbf514
# VQAonline <img src="https://cdn-uploads.huggingface.co/production/uploads/6337e9b676421c05430a0287/6vt42q8w7EWx9vVuZqc3U.png" width="50%"> [**🌐 Homepage**](https://vqaonline.github.io/) | [**🤗 Dataset**](https://huggingface.co/datasets/ChongyanChen/VQAonline/) | [**📖 arXiv**](https://arxiv.org/abs/2311.15562) ## Dataset Description We introduce VQAonline, the first VQA dataset in which all contents originate from an authentic use case. VQAonline includes 64K visual questions sourced from an online question answering community (i.e., StackExchange). It differs from prior datasets; examples include that it contains: - (1) authentic context that clarifies the question - (2) an answer the individual asking the question validated as acceptable from all community provided answers, - (3) answers that are considerably longer (e.g., a mean of 173 words versus typically 11 words or fewer in prior work) - (4) user-chosen topics for each visual question from 105 diverse topics revealing the dataset’s inherent diversity. ## Dataset Structure In total, the VQAonline dataset contains 64,696 visual questions. We designed VQAonline to support few-shot settings given the recent exciting developments around in-context few-shot learning with foundation models. Thus, we split the dataset as follows: - Training set: 665 visual questions - Validation set: 285 visual questions - Test set: 63,746 visual questions The questions, contexts, and answers are provided in the json files. Due to the constraint of huggingface, we separate the image files into 7 folders (named from images1 to images7), each of which contains 10,000 image files, except for folder "images 7". ## Contact - Chongyan Chen: [email protected] ## Citation **BibTeX:** ```bibtex @article{chen2023vqaonline, title={Fully Authentic Visual Question Answering Dataset from Online Communities}, author={Chen, Chongyan and Liu, Mengchen and Codella, Noel and Li, Yunsheng and Yuan, Lu and Gurari, Danna}, journal={arXiv preprint arXiv:2311.15562}, year={2023} } ```
ChongyanChen/VQAonline
[ "task_categories:visual-question-answering", "license:cc-by-sa-4.0", "arxiv:2311.15562", "region:us" ]
2023-12-22T15:00:02+00:00
{"license": "cc-by-sa-4.0", "task_categories": ["visual-question-answering"], "pretty_name": "VQAonline"}
2023-12-26T09:13:34+00:00
[ "2311.15562" ]
[]
TAGS #task_categories-visual-question-answering #license-cc-by-sa-4.0 #arxiv-2311.15562 #region-us
# VQAonline <img src="URL width="50%"> Homepage | Dataset | arXiv ## Dataset Description We introduce VQAonline, the first VQA dataset in which all contents originate from an authentic use case. VQAonline includes 64K visual questions sourced from an online question answering community (i.e., StackExchange). It differs from prior datasets; examples include that it contains: - (1) authentic context that clarifies the question - (2) an answer the individual asking the question validated as acceptable from all community provided answers, - (3) answers that are considerably longer (e.g., a mean of 173 words versus typically 11 words or fewer in prior work) - (4) user-chosen topics for each visual question from 105 diverse topics revealing the dataset’s inherent diversity. ## Dataset Structure In total, the VQAonline dataset contains 64,696 visual questions. We designed VQAonline to support few-shot settings given the recent exciting developments around in-context few-shot learning with foundation models. Thus, we split the dataset as follows: - Training set: 665 visual questions - Validation set: 285 visual questions - Test set: 63,746 visual questions The questions, contexts, and answers are provided in the json files. Due to the constraint of huggingface, we separate the image files into 7 folders (named from images1 to images7), each of which contains 10,000 image files, except for folder "images 7". ## Contact - Chongyan Chen: chongyanchen_hci@URL BibTeX:
[ "# VQAonline\n\n<img src=\"URL width=\"50%\">\n\n Homepage | Dataset | arXiv", "## Dataset Description\nWe introduce VQAonline, the first VQA dataset in which all contents originate from an authentic use case. \n\nVQAonline includes 64K visual questions sourced from an online question answering community (i.e., StackExchange).\n\nIt differs from prior datasets; examples include that it contains: \n- (1) authentic context that clarifies the question\n- (2) an answer the individual asking the question validated as acceptable from all community provided answers,\n- (3) answers that are considerably longer (e.g., a mean of 173 words versus typically 11 words or fewer in prior work)\n- (4) user-chosen topics for each visual question from 105 diverse topics revealing the dataset’s inherent diversity.", "## Dataset Structure\nIn total, the VQAonline dataset contains 64,696 visual questions.\n\nWe designed VQAonline to support few-shot settings given the recent exciting developments around in-context few-shot learning with foundation models. Thus, we split the dataset as follows:\n\n- Training set: 665 visual questions \n- Validation set: 285 visual questions \n- Test set: 63,746 visual questions \n\nThe questions, contexts, and answers are provided in the json files. \n\nDue to the constraint of huggingface, we separate the image files into 7 folders (named from images1 to images7), each of which contains 10,000 image files, except for folder \"images 7\".", "## Contact\n- Chongyan Chen: chongyanchen_hci@URL\n\nBibTeX:" ]
[ "TAGS\n#task_categories-visual-question-answering #license-cc-by-sa-4.0 #arxiv-2311.15562 #region-us \n", "# VQAonline\n\n<img src=\"URL width=\"50%\">\n\n Homepage | Dataset | arXiv", "## Dataset Description\nWe introduce VQAonline, the first VQA dataset in which all contents originate from an authentic use case. \n\nVQAonline includes 64K visual questions sourced from an online question answering community (i.e., StackExchange).\n\nIt differs from prior datasets; examples include that it contains: \n- (1) authentic context that clarifies the question\n- (2) an answer the individual asking the question validated as acceptable from all community provided answers,\n- (3) answers that are considerably longer (e.g., a mean of 173 words versus typically 11 words or fewer in prior work)\n- (4) user-chosen topics for each visual question from 105 diverse topics revealing the dataset’s inherent diversity.", "## Dataset Structure\nIn total, the VQAonline dataset contains 64,696 visual questions.\n\nWe designed VQAonline to support few-shot settings given the recent exciting developments around in-context few-shot learning with foundation models. Thus, we split the dataset as follows:\n\n- Training set: 665 visual questions \n- Validation set: 285 visual questions \n- Test set: 63,746 visual questions \n\nThe questions, contexts, and answers are provided in the json files. \n\nDue to the constraint of huggingface, we separate the image files into 7 folders (named from images1 to images7), each of which contains 10,000 image files, except for folder \"images 7\".", "## Contact\n- Chongyan Chen: chongyanchen_hci@URL\n\nBibTeX:" ]
[ 41, 26, 163, 159, 22 ]
[ "passage: TAGS\n#task_categories-visual-question-answering #license-cc-by-sa-4.0 #arxiv-2311.15562 #region-us \n# VQAonline\n\n<img src=\"URL width=\"50%\">\n\n Homepage | Dataset | arXiv## Dataset Description\nWe introduce VQAonline, the first VQA dataset in which all contents originate from an authentic use case. \n\nVQAonline includes 64K visual questions sourced from an online question answering community (i.e., StackExchange).\n\nIt differs from prior datasets; examples include that it contains: \n- (1) authentic context that clarifies the question\n- (2) an answer the individual asking the question validated as acceptable from all community provided answers,\n- (3) answers that are considerably longer (e.g., a mean of 173 words versus typically 11 words or fewer in prior work)\n- (4) user-chosen topics for each visual question from 105 diverse topics revealing the dataset’s inherent diversity.## Dataset Structure\nIn total, the VQAonline dataset contains 64,696 visual questions.\n\nWe designed VQAonline to support few-shot settings given the recent exciting developments around in-context few-shot learning with foundation models. Thus, we split the dataset as follows:\n\n- Training set: 665 visual questions \n- Validation set: 285 visual questions \n- Test set: 63,746 visual questions \n\nThe questions, contexts, and answers are provided in the json files. \n\nDue to the constraint of huggingface, we separate the image files into 7 folders (named from images1 to images7), each of which contains 10,000 image files, except for folder \"images 7\".## Contact\n- Chongyan Chen: chongyanchen_hci@URL\n\nBibTeX:" ]
644c1763fc3d4de1f25f938a9eafcca5e61cfa32
##Ontario Laws & Regulations Dataset # **⚖️Ontario Laws & Regs⚖️** The Ontario Laws & Regs dataset contains 5,096 Ontario laws and regulations. The laws and regulations consist of the most recent version of all current and revoked laws and regs. The dataset is distributed under the MIT license and is intended to facilitate ML and data tasks involving Ontario legislation. In addition, a scraper is provided which is capable of capturing different configurations of the data directly from the Ontario eLaws website, found [here](https://github.com/hordruma/elaws_scraper). ## Structure🏛️ Each law and regulation is stored in its own json file which contains the following fields & sub-fields: 1. act_info/reg_info: Act/Reg Info, includes: -- full_title: Full Title -- act_name_text/reg_name_text: Act/Reg Name Text -- citation: Citation -- act_under: For regulations only, the parent act. -- url: URL -- date_scraped: Date Scraped 2. copyright: Crown copyright blurb -- Copyright: As required by eLaws TOS 3. versions: Versions, includes a list of versions containing: -- a_href: URL slug for given version -- valid_from: Valid from -- valid_to: Valid to 14. current_regs: Current Regs (for Statutes), being a list of associated regulations, containing: -- a_href: URL slug for given regulation -- Citation: Citation -- title: Title 18. revoked_regs: Revoked Regs (for Statutes), being a list of associated revoked regulations, containing: -- revoked_reg_a_href: URL slug for given revoked regulation -- revoked_reg_citation: Citation -- revoked_reg_title: Title 22. content: Contents of the act or regulation, being a list of sections, containing: -- id: Section # -- section: Section title -- content: Section content, as utf-8 text -- raw_html: raw html of section content, containing extra features ## Collection 📥 Documents were sourced from the [Ontario eLaws Website](https://www.ontario.ca/laws). [`eLaws Scraper`](https://github.com/hordruma/elaws_scraper/) was used to extract the documents and parse them to jsons. ## Licence 📜 Both the dataset and its associated scraper are distributed under MIT license.
hordruma/ontario_laws_and_regs
[ "task_categories:text-generation", "task_categories:fill-mask", "task_categories:text-retrieval", "task_ids:language-modeling", "task_ids:masked-language-modeling", "task_ids:document-retrieval", "size_categories:1K<n<10K", "source_datasets:Ontario eLaws, Legislation & Regulations", "language:en", "license:mit", "law", "legal", "canada", "ontario", "legislation", "regulations", "region:us" ]
2023-12-22T15:04:58+00:00
{"language": ["en"], "license": "mit", "size_categories": ["1K<n<10K"], "source_datasets": ["Ontario eLaws, Legislation & Regulations"], "task_categories": ["text-generation", "fill-mask", "text-retrieval"], "task_ids": ["language-modeling", "masked-language-modeling", "document-retrieval"], "pretty_name": "Ontario Law and Regulations", "tags": ["law", "legal", "canada", "ontario", "legislation", "regulations"], "language_details": "en-CA, en-GB", "viewer": true}
2023-12-23T16:09:29+00:00
[]
[ "en" ]
TAGS #task_categories-text-generation #task_categories-fill-mask #task_categories-text-retrieval #task_ids-language-modeling #task_ids-masked-language-modeling #task_ids-document-retrieval #size_categories-1K<n<10K #source_datasets-Ontario eLaws, Legislation & Regulations #language-English #license-mit #law #legal #canada #ontario #legislation #regulations #region-us
##Ontario Laws & Regulations Dataset # ️Ontario Laws & Regs️ The Ontario Laws & Regs dataset contains 5,096 Ontario laws and regulations. The laws and regulations consist of the most recent version of all current and revoked laws and regs. The dataset is distributed under the MIT license and is intended to facilitate ML and data tasks involving Ontario legislation. In addition, a scraper is provided which is capable of capturing different configurations of the data directly from the Ontario eLaws website, found here. ## Structure️ Each law and regulation is stored in its own json file which contains the following fields & sub-fields: 1. act_info/reg_info: Act/Reg Info, includes: -- full_title: Full Title -- act_name_text/reg_name_text: Act/Reg Name Text -- citation: Citation -- act_under: For regulations only, the parent act. -- url: URL -- date_scraped: Date Scraped 2. copyright: Crown copyright blurb -- Copyright: As required by eLaws TOS 3. versions: Versions, includes a list of versions containing: -- a_href: URL slug for given version -- valid_from: Valid from -- valid_to: Valid to 14. current_regs: Current Regs (for Statutes), being a list of associated regulations, containing: -- a_href: URL slug for given regulation -- Citation: Citation -- title: Title 18. revoked_regs: Revoked Regs (for Statutes), being a list of associated revoked regulations, containing: -- revoked_reg_a_href: URL slug for given revoked regulation -- revoked_reg_citation: Citation -- revoked_reg_title: Title 22. content: Contents of the act or regulation, being a list of sections, containing: -- id: Section # -- section: Section title -- content: Section content, as utf-8 text -- raw_html: raw html of section content, containing extra features ## Collection Documents were sourced from the Ontario eLaws Website. 'eLaws Scraper' was used to extract the documents and parse them to jsons. ## Licence Both the dataset and its associated scraper are distributed under MIT license.
[ "# ️Ontario Laws & Regs️\n\nThe Ontario Laws & Regs dataset contains 5,096 Ontario laws and regulations. \n\nThe laws and regulations consist of the most recent version of all current and revoked laws and regs. \n\nThe dataset is distributed under the MIT license and is intended to facilitate ML and data tasks involving Ontario legislation.\n\nIn addition, a scraper is provided which is capable of capturing different configurations of the data directly from the Ontario eLaws website, found here.", "## Structure️\nEach law and regulation is stored in its own json file which contains the following fields & sub-fields:\n\n1. act_info/reg_info: Act/Reg Info, includes:\n-- full_title: Full Title\n-- act_name_text/reg_name_text: Act/Reg Name Text\n-- citation: Citation\n-- act_under: For regulations only, the parent act.\n-- url: URL\n-- date_scraped: Date Scraped\n2. copyright: Crown copyright blurb\n-- Copyright: As required by eLaws TOS\n3. versions: Versions, includes a list of versions containing:\n-- a_href: URL slug for given version\n-- valid_from: Valid from\n-- valid_to: Valid to\n14. current_regs: Current Regs (for Statutes), being a list of associated regulations, containing:\n-- a_href: URL slug for given regulation\n-- Citation: Citation\n-- title: Title\n18. revoked_regs: Revoked Regs (for Statutes), being a list of associated revoked regulations, containing:\n-- revoked_reg_a_href: URL slug for given revoked regulation\n-- revoked_reg_citation: Citation\n-- revoked_reg_title: Title\n22. content: Contents of the act or regulation, being a list of sections, containing:\n-- id: Section #\n-- section: Section title\n-- content: Section content, as utf-8 text\n-- raw_html: raw html of section content, containing extra features", "## Collection \nDocuments were sourced from the Ontario eLaws Website.\n\n'eLaws Scraper' was used to extract the documents and parse them to jsons.", "## Licence \nBoth the dataset and its associated scraper are distributed under MIT license." ]
[ "TAGS\n#task_categories-text-generation #task_categories-fill-mask #task_categories-text-retrieval #task_ids-language-modeling #task_ids-masked-language-modeling #task_ids-document-retrieval #size_categories-1K<n<10K #source_datasets-Ontario eLaws, Legislation & Regulations #language-English #license-mit #law #legal #canada #ontario #legislation #regulations #region-us \n", "# ️Ontario Laws & Regs️\n\nThe Ontario Laws & Regs dataset contains 5,096 Ontario laws and regulations. \n\nThe laws and regulations consist of the most recent version of all current and revoked laws and regs. \n\nThe dataset is distributed under the MIT license and is intended to facilitate ML and data tasks involving Ontario legislation.\n\nIn addition, a scraper is provided which is capable of capturing different configurations of the data directly from the Ontario eLaws website, found here.", "## Structure️\nEach law and regulation is stored in its own json file which contains the following fields & sub-fields:\n\n1. act_info/reg_info: Act/Reg Info, includes:\n-- full_title: Full Title\n-- act_name_text/reg_name_text: Act/Reg Name Text\n-- citation: Citation\n-- act_under: For regulations only, the parent act.\n-- url: URL\n-- date_scraped: Date Scraped\n2. copyright: Crown copyright blurb\n-- Copyright: As required by eLaws TOS\n3. versions: Versions, includes a list of versions containing:\n-- a_href: URL slug for given version\n-- valid_from: Valid from\n-- valid_to: Valid to\n14. current_regs: Current Regs (for Statutes), being a list of associated regulations, containing:\n-- a_href: URL slug for given regulation\n-- Citation: Citation\n-- title: Title\n18. revoked_regs: Revoked Regs (for Statutes), being a list of associated revoked regulations, containing:\n-- revoked_reg_a_href: URL slug for given revoked regulation\n-- revoked_reg_citation: Citation\n-- revoked_reg_title: Title\n22. content: Contents of the act or regulation, being a list of sections, containing:\n-- id: Section #\n-- section: Section title\n-- content: Section content, as utf-8 text\n-- raw_html: raw html of section content, containing extra features", "## Collection \nDocuments were sourced from the Ontario eLaws Website.\n\n'eLaws Scraper' was used to extract the documents and parse them to jsons.", "## Licence \nBoth the dataset and its associated scraper are distributed under MIT license." ]
[ 130, 114, 350, 36, 19 ]
[ "passage: TAGS\n#task_categories-text-generation #task_categories-fill-mask #task_categories-text-retrieval #task_ids-language-modeling #task_ids-masked-language-modeling #task_ids-document-retrieval #size_categories-1K<n<10K #source_datasets-Ontario eLaws, Legislation & Regulations #language-English #license-mit #law #legal #canada #ontario #legislation #regulations #region-us \n# ️Ontario Laws & Regs️\n\nThe Ontario Laws & Regs dataset contains 5,096 Ontario laws and regulations. \n\nThe laws and regulations consist of the most recent version of all current and revoked laws and regs. \n\nThe dataset is distributed under the MIT license and is intended to facilitate ML and data tasks involving Ontario legislation.\n\nIn addition, a scraper is provided which is capable of capturing different configurations of the data directly from the Ontario eLaws website, found here." ]
e3e2eb3d73297e7cade85f23662ffd24ab534b0e
# Dataset Card for "arc_both" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ibragim-bad/arc_both
[ "region:us" ]
2023-12-22T15:05:00+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "choices", "sequence": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": "string"}]}, {"name": "answerKey", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 968760, "num_examples": 3370}, {"name": "validation", "num_bytes": 254054, "num_examples": 869}, {"name": "test", "num_bytes": 1033025, "num_examples": 3548}], "download_size": 1193802, "dataset_size": 2255839}}
2023-12-22T15:05:23+00:00
[]
[]
TAGS #region-us
# Dataset Card for "arc_both" More Information needed
[ "# Dataset Card for \"arc_both\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"arc_both\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"arc_both\"\n\nMore Information needed" ]
b953d2258d3cd859a7da6145daa88792fea4a676
# Dataset Card for Dataset Name <!-- Provide a quick summary of the dataset. --> This dataset card has 3 raw and 3 processed MRIs that are used by the Alzheimer Classifer Demo ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** Rootstrap - **License:** MIT ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** https://adni.loni.usc.edu/ ## Uses <!-- Address questions around how the dataset is intended to be used. --> The dataset is used for the demo ### Direct Use <!-- This section describes suitable use cases for the dataset. --> This dataset is intended to use only for the demo ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> This dataset was not used for training the model and can not be used for training a new model as it is very limited. In addition, this MRIs are sensitive, so its use has to be approved by the Alzheimer’s Disease Neuroimaging Initiative (ADNI) ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> The dataset consist of 3 MRI, one for each classification class. Each of this MRIs can be seen in its raw state and in its processed state. So we have a total of 6 MRIs divided into raw and processed folders each of them consisting of 3 MRIs. ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> This dataset was created for the demo ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> This MRIs where obtained from the Alzheimer’s Disease Neuroimaging Initiative (ADNI) #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> For creating the model 1614 MRIs were used. From this original dataset, 1 MRI from each class was gathered. #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> This MRIs where created by different researchers which colaborate with the Alzheimer’s Disease Neuroimaging Initiative (ADNI) ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> As we stated before, this dataset is only ment to be used for the demo and cannot be reproduced in any way. ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Dataset Card Authors Rootstrap ## Dataset Card Contact [email protected]
rootstrap-org/Alzheimer-Classifier-Demo
[ "task_categories:image-classification", "size_categories:n<1K", "language:en", "license:mit", "medical", "region:us" ]
2023-12-22T15:45:30+00:00
{"language": ["en"], "license": "mit", "size_categories": ["n<1K"], "task_categories": ["image-classification"], "tags": ["medical"]}
2024-01-23T17:49:30+00:00
[]
[ "en" ]
TAGS #task_categories-image-classification #size_categories-n<1K #language-English #license-mit #medical #region-us
# Dataset Card for Dataset Name This dataset card has 3 raw and 3 processed MRIs that are used by the Alzheimer Classifer Demo ## Dataset Details ### Dataset Description - Curated by: Rootstrap - License: MIT ### Dataset Sources [optional] - Repository: URL ## Uses The dataset is used for the demo ### Direct Use This dataset is intended to use only for the demo ### Out-of-Scope Use This dataset was not used for training the model and can not be used for training a new model as it is very limited. In addition, this MRIs are sensitive, so its use has to be approved by the Alzheimer’s Disease Neuroimaging Initiative (ADNI) ## Dataset Structure The dataset consist of 3 MRI, one for each classification class. Each of this MRIs can be seen in its raw state and in its processed state. So we have a total of 6 MRIs divided into raw and processed folders each of them consisting of 3 MRIs. ## Dataset Creation ### Curation Rationale This dataset was created for the demo ### Source Data This MRIs where obtained from the Alzheimer’s Disease Neuroimaging Initiative (ADNI) #### Data Collection and Processing For creating the model 1614 MRIs were used. From this original dataset, 1 MRI from each class was gathered. #### Who are the source data producers? This MRIs where created by different researchers which colaborate with the Alzheimer’s Disease Neuroimaging Initiative (ADNI) ## Bias, Risks, and Limitations As we stated before, this dataset is only ment to be used for the demo and cannot be reproduced in any way. ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Dataset Card Authors Rootstrap ## Dataset Card Contact info@URL
[ "# Dataset Card for Dataset Name\n\n\n\nThis dataset card has 3 raw and 3 processed MRIs that are used by the Alzheimer Classifer Demo", "## Dataset Details", "### Dataset Description\n\n\n\n- Curated by: Rootstrap\n- License: MIT", "### Dataset Sources [optional]\n\n\n\n- Repository: URL", "## Uses\n\n\n\nThe dataset is used for the demo", "### Direct Use\n\n\n\nThis dataset is intended to use only for the demo", "### Out-of-Scope Use\n\n\n\nThis dataset was not used for training the model and can not be used for training a new model as it is very limited.\nIn addition, this MRIs are sensitive, so its use has to be approved by the Alzheimer’s Disease Neuroimaging Initiative (ADNI)", "## Dataset Structure\n\n\n\nThe dataset consist of 3 MRI, one for each classification class. Each of this MRIs can be seen in its raw state and in its processed state.\nSo we have a total of 6 MRIs divided into raw and processed folders each of them consisting of 3 MRIs.", "## Dataset Creation", "### Curation Rationale\n\n\n\nThis dataset was created for the demo", "### Source Data\n\n\n\nThis MRIs where obtained from the Alzheimer’s Disease Neuroimaging Initiative (ADNI)", "#### Data Collection and Processing\n\n\n\nFor creating the model 1614 MRIs were used. From this original dataset, 1 MRI from each class was gathered.", "#### Who are the source data producers?\n\n\n\nThis MRIs where created by different researchers which colaborate with the Alzheimer’s Disease Neuroimaging Initiative (ADNI)", "## Bias, Risks, and Limitations\n\n\n\nAs we stated before, this dataset is only ment to be used for the demo and cannot be reproduced in any way.", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.", "## Dataset Card Authors\n\nRootstrap", "## Dataset Card Contact\n\ninfo@URL" ]
[ "TAGS\n#task_categories-image-classification #size_categories-n<1K #language-English #license-mit #medical #region-us \n", "# Dataset Card for Dataset Name\n\n\n\nThis dataset card has 3 raw and 3 processed MRIs that are used by the Alzheimer Classifer Demo", "## Dataset Details", "### Dataset Description\n\n\n\n- Curated by: Rootstrap\n- License: MIT", "### Dataset Sources [optional]\n\n\n\n- Repository: URL", "## Uses\n\n\n\nThe dataset is used for the demo", "### Direct Use\n\n\n\nThis dataset is intended to use only for the demo", "### Out-of-Scope Use\n\n\n\nThis dataset was not used for training the model and can not be used for training a new model as it is very limited.\nIn addition, this MRIs are sensitive, so its use has to be approved by the Alzheimer’s Disease Neuroimaging Initiative (ADNI)", "## Dataset Structure\n\n\n\nThe dataset consist of 3 MRI, one for each classification class. Each of this MRIs can be seen in its raw state and in its processed state.\nSo we have a total of 6 MRIs divided into raw and processed folders each of them consisting of 3 MRIs.", "## Dataset Creation", "### Curation Rationale\n\n\n\nThis dataset was created for the demo", "### Source Data\n\n\n\nThis MRIs where obtained from the Alzheimer’s Disease Neuroimaging Initiative (ADNI)", "#### Data Collection and Processing\n\n\n\nFor creating the model 1614 MRIs were used. From this original dataset, 1 MRI from each class was gathered.", "#### Who are the source data producers?\n\n\n\nThis MRIs where created by different researchers which colaborate with the Alzheimer’s Disease Neuroimaging Initiative (ADNI)", "## Bias, Risks, and Limitations\n\n\n\nAs we stated before, this dataset is only ment to be used for the demo and cannot be reproduced in any way.", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.", "## Dataset Card Authors\n\nRootstrap", "## Dataset Card Contact\n\ninfo@URL" ]
[ 39, 32, 4, 17, 16, 11, 15, 66, 72, 5, 15, 25, 36, 37, 38, 35, 9, 8 ]
[ "passage: TAGS\n#task_categories-image-classification #size_categories-n<1K #language-English #license-mit #medical #region-us \n# Dataset Card for Dataset Name\n\n\n\nThis dataset card has 3 raw and 3 processed MRIs that are used by the Alzheimer Classifer Demo## Dataset Details### Dataset Description\n\n\n\n- Curated by: Rootstrap\n- License: MIT### Dataset Sources [optional]\n\n\n\n- Repository: URL## Uses\n\n\n\nThe dataset is used for the demo### Direct Use\n\n\n\nThis dataset is intended to use only for the demo### Out-of-Scope Use\n\n\n\nThis dataset was not used for training the model and can not be used for training a new model as it is very limited.\nIn addition, this MRIs are sensitive, so its use has to be approved by the Alzheimer’s Disease Neuroimaging Initiative (ADNI)## Dataset Structure\n\n\n\nThe dataset consist of 3 MRI, one for each classification class. Each of this MRIs can be seen in its raw state and in its processed state.\nSo we have a total of 6 MRIs divided into raw and processed folders each of them consisting of 3 MRIs.## Dataset Creation### Curation Rationale\n\n\n\nThis dataset was created for the demo### Source Data\n\n\n\nThis MRIs where obtained from the Alzheimer’s Disease Neuroimaging Initiative (ADNI)#### Data Collection and Processing\n\n\n\nFor creating the model 1614 MRIs were used. From this original dataset, 1 MRI from each class was gathered.#### Who are the source data producers?\n\n\n\nThis MRIs where created by different researchers which colaborate with the Alzheimer’s Disease Neuroimaging Initiative (ADNI)## Bias, Risks, and Limitations\n\n\n\nAs we stated before, this dataset is only ment to be used for the demo and cannot be reproduced in any way.### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.## Dataset Card Authors\n\nRootstrap## Dataset Card Contact\n\ninfo@URL" ]
b393eaf7db077d3f14a8e1e642fe01d24c695b06
# Enriched Topical-Chat: A Dialogue Act and Knowledge Sentence annotated version of Topical-Chat This README describes Enriched Topical-Chat, an augmentation of Topical-Chat that contains dialogue act and knowledge sentence annotations for each turn in the dataset. Each annotation is automatically annotated using off-the-shelf models. ## Knowledge Sentence Annoations Each conversation in Topical-Chat has a pair of reading sets which consists of a set of knowledge sentences. For every turn and knowledge sentence in the Topical-Chat dataset, we computed a TFIDF vector. We then computed the cosine similarity between a turn in the conversation and selected the top-1 knowledge sentence. In the new data release, for each conversation turn, we will present the knowledge sentences selected along with the similarity score. ## Dialogue Act Annoations We obtain the dialogue acts for each turn by running an off-the-shelf SVM dialogue act tagger released by (https://github.com/ColingPaper2018/DialogueAct-Tagger). This tagger was trained on five datasets (Switchboard, Oasis BT, Maptask, VerbMobil2, AMI). ## Prerequisites After cloning the repo you must first follow the instructions in https://github.com/alexa/Topical-Chat/blob/master/README.md. This includes building the original dataset along with the reading sets. Look at the Build Section for the exact steps. Once the original dataset is built each .json file will contain pointers to the knowledge sentences in the reading sets. ### Conversations: The data is hosted on s3. To pull the data run these commands: ``` wget https://enriched-topical-chat.s3.amazonaws.com/train.json wget https://enriched-topical-chat.s3.amazonaws.com/valid_freq.json wget https://enriched-topical-chat.s3.amazonaws.com/valid_rare.json wget https://enriched-topical-chat.s3.amazonaws.com/test_freq.json wget https://enriched-topical-chat.s3.amazonaws.com/test_rare.json ``` Each .json file has the specified format: ``` { "t_d004c097-424d-45d4-8f91-833d85c2da31": { "article_url": "<link to washington post article>", "config": "C", "content": [ { "message": ["Did you know that the University of Iowa's locker room is painted pink?", "I wonder why?"], "agent": "agent_1", "segmented_annotations": [ { "da": "PropQ", "gt_ks": {"score": 0.73,"ds": "wiki", "section": "FS1", "start_index": 0, "end_index": 100}, }, { "da": "ChoiceQ", "gt_ks": {"score": 0.0, "ds": "article", "section": "AS4", "start_index": 0, "end_index": 100}, } ], "gt_turn_ks": {"score": 0.67, "ds": "fun_facts", "section": "FS1", "index": 0} }, ``` ``` The additional fields are: message: a list containing the segments of each turn segmented_annotations: a list of annotations for each segment within a turn each. da: ground truth dialog act associated with segmented response gt_ks: ground truth knowledge sentence associated with segmented response ds: data source knowledge retrieved from. wiki, fun_facts or article fun_facts: section: which section containing the fun facts i.e. FS1 index: which element in the list of fun facts wiki: section: which section containing the wikipedia sentence i.e. FS2 start_index: index of beginning character of sentence in article end_index: index of end character article: section: which section of article. i.e. AS4 start_index: index of beginning character of sentence in article end_index: index of end character gt_turn_ks: ground truth knowlege sentence associated with turn ``` ## Citation ### Enriched Topical-Chat ``` @article{hedayatnia2020policy, title={Policy-Driven Neural Response Generation for Knowledge-Grounded Dialogue Systems}, author={Hedayatnia, Behnam and Gopalakrishnan, Karthik and Kim, Seokhwan and Liu, Yang and Eric, Mihail and Hakkani-Tur, Dilek}, journal={arXiv preprint arXiv:2005.12529}, year={2020} } ``` ### Topical-Chat ``` @inproceedings{gopalakrishnan2019topical, author={Gopalakrishnan, Karthik and Hedayatnia, Behnam and Chen, Qinlang and Gottardi, Anna and Kwatra, Sanjeev and Venkatesh, Anu and Gabriel, Raefer and Hakkani-Tür, Dilek}, title={{Topical-Chat: Towards Knowledge-Grounded Open-Domain Conversations}}, year={2019}, booktitle={INTERSPEECH} } ```
Conversational-Reasoning/Topical-Chat-Enriched
[ "region:us" ]
2023-12-22T15:52:39+00:00
{}
2023-12-22T16:41:14+00:00
[]
[]
TAGS #region-us
# Enriched Topical-Chat: A Dialogue Act and Knowledge Sentence annotated version of Topical-Chat This README describes Enriched Topical-Chat, an augmentation of Topical-Chat that contains dialogue act and knowledge sentence annotations for each turn in the dataset. Each annotation is automatically annotated using off-the-shelf models. ## Knowledge Sentence Annoations Each conversation in Topical-Chat has a pair of reading sets which consists of a set of knowledge sentences. For every turn and knowledge sentence in the Topical-Chat dataset, we computed a TFIDF vector. We then computed the cosine similarity between a turn in the conversation and selected the top-1 knowledge sentence. In the new data release, for each conversation turn, we will present the knowledge sentences selected along with the similarity score. ## Dialogue Act Annoations We obtain the dialogue acts for each turn by running an off-the-shelf SVM dialogue act tagger released by (URL This tagger was trained on five datasets (Switchboard, Oasis BT, Maptask, VerbMobil2, AMI). ## Prerequisites After cloning the repo you must first follow the instructions in URL This includes building the original dataset along with the reading sets. Look at the Build Section for the exact steps. Once the original dataset is built each .json file will contain pointers to the knowledge sentences in the reading sets. ### Conversations: The data is hosted on s3. To pull the data run these commands: Each .json file has the specified format: ### Enriched Topical-Chat ### Topical-Chat
[ "# Enriched Topical-Chat: A Dialogue Act and Knowledge Sentence annotated version of Topical-Chat\n\nThis README describes Enriched Topical-Chat, an augmentation of Topical-Chat that contains dialogue act and knowledge sentence annotations for each turn in the dataset. Each annotation is automatically annotated using off-the-shelf models.", "## Knowledge Sentence Annoations\n\nEach conversation in Topical-Chat has a pair of reading sets which consists of a set of knowledge sentences. For every turn and knowledge sentence in the Topical-Chat dataset, we computed a TFIDF vector. We then computed the cosine similarity between a turn in the conversation and selected the top-1 knowledge sentence. In the new data release, for each conversation turn, we will present the knowledge sentences selected along with the similarity score.", "## Dialogue Act Annoations\n\nWe obtain the dialogue acts for each turn by running an off-the-shelf SVM dialogue act tagger released by (URL \nThis tagger was trained on five datasets (Switchboard, Oasis BT, Maptask, VerbMobil2, AMI).", "## Prerequisites\n\nAfter cloning the repo you must first follow the instructions in URL This includes building the original dataset along with the reading sets. Look at the Build Section for the exact steps. Once the original dataset is built each .json file will contain pointers to the knowledge sentences in the reading sets.", "### Conversations:\nThe data is hosted on s3. To pull the data run these commands: \n\n\nEach .json file has the specified format:", "### Enriched Topical-Chat", "### Topical-Chat" ]
[ "TAGS\n#region-us \n", "# Enriched Topical-Chat: A Dialogue Act and Knowledge Sentence annotated version of Topical-Chat\n\nThis README describes Enriched Topical-Chat, an augmentation of Topical-Chat that contains dialogue act and knowledge sentence annotations for each turn in the dataset. Each annotation is automatically annotated using off-the-shelf models.", "## Knowledge Sentence Annoations\n\nEach conversation in Topical-Chat has a pair of reading sets which consists of a set of knowledge sentences. For every turn and knowledge sentence in the Topical-Chat dataset, we computed a TFIDF vector. We then computed the cosine similarity between a turn in the conversation and selected the top-1 knowledge sentence. In the new data release, for each conversation turn, we will present the knowledge sentences selected along with the similarity score.", "## Dialogue Act Annoations\n\nWe obtain the dialogue acts for each turn by running an off-the-shelf SVM dialogue act tagger released by (URL \nThis tagger was trained on five datasets (Switchboard, Oasis BT, Maptask, VerbMobil2, AMI).", "## Prerequisites\n\nAfter cloning the repo you must first follow the instructions in URL This includes building the original dataset along with the reading sets. Look at the Build Section for the exact steps. Once the original dataset is built each .json file will contain pointers to the knowledge sentences in the reading sets.", "### Conversations:\nThe data is hosted on s3. To pull the data run these commands: \n\n\nEach .json file has the specified format:", "### Enriched Topical-Chat", "### Topical-Chat" ]
[ 6, 83, 107, 66, 71, 36, 9, 6 ]
[ "passage: TAGS\n#region-us \n# Enriched Topical-Chat: A Dialogue Act and Knowledge Sentence annotated version of Topical-Chat\n\nThis README describes Enriched Topical-Chat, an augmentation of Topical-Chat that contains dialogue act and knowledge sentence annotations for each turn in the dataset. Each annotation is automatically annotated using off-the-shelf models.## Knowledge Sentence Annoations\n\nEach conversation in Topical-Chat has a pair of reading sets which consists of a set of knowledge sentences. For every turn and knowledge sentence in the Topical-Chat dataset, we computed a TFIDF vector. We then computed the cosine similarity between a turn in the conversation and selected the top-1 knowledge sentence. In the new data release, for each conversation turn, we will present the knowledge sentences selected along with the similarity score.## Dialogue Act Annoations\n\nWe obtain the dialogue acts for each turn by running an off-the-shelf SVM dialogue act tagger released by (URL \nThis tagger was trained on five datasets (Switchboard, Oasis BT, Maptask, VerbMobil2, AMI).## Prerequisites\n\nAfter cloning the repo you must first follow the instructions in URL This includes building the original dataset along with the reading sets. Look at the Build Section for the exact steps. Once the original dataset is built each .json file will contain pointers to the knowledge sentences in the reading sets.### Conversations:\nThe data is hosted on s3. To pull the data run these commands: \n\n\nEach .json file has the specified format:### Enriched Topical-Chat### Topical-Chat" ]
b21732f3c6ac1269a26272502e22ecd75b434782
# Dataset Card for "livecodebench-execute-v3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
minimario/livecodebench-execute-v3
[ "region:us" ]
2023-12-22T16:33:56+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "function_name", "dtype": "string"}, {"name": "code", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "numsteps", "dtype": "int64"}, {"name": "problem_id", "sequence": "int64"}], "splits": [{"name": "test", "num_bytes": 194877, "num_examples": 479}], "download_size": 67676, "dataset_size": 194877}, "configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}]}
2023-12-22T16:53:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "livecodebench-execute-v3" More Information needed
[ "# Dataset Card for \"livecodebench-execute-v3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"livecodebench-execute-v3\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"livecodebench-execute-v3\"\n\nMore Information needed" ]
e1eba2a7470cb41252ffda92e64eb88d984ef78a
# Topical-Chat ASR: An ASR-augmented version of Topical-Chat This README describes Topical-Chat ASR, an augmentation of Topical-Chat with non-trivial synthetic and actual ASR hypotheses. ## Synthetic: `/TopicalChatASR/synthetic` For each file in the original Topical-Chat dataset, non-trivial synthetic ASR hypotheses are constructed at four different corpus-level **target** Word Error Rates (WER). We used the ASR error simulator method based on n-gram confusion matrix and trained the simulator on transcribed ASR output from an internal user study. For a detailed description of the adopted simulation method, see: [Investigation of Error Simulation Techniques for Learning Dialog Policies for Conversational Error Recovery](https://arxiv.org/abs/1911.03378). The target WERs chosen for simulation were 0.1, 0.15, 0.2 and 0.3. For each target WER, the errors were simulated with a single random seed for train and five random seeds for validation and test splits. Thus, **for each target WER**, there are: 1. 1 file for training 2. 5 files for `valid_freq`, 5 files for `valid_rare` 3. 5 files for `test_freq`, 5 files for `test_rare` For each turn in each dialog, the `"message"` key contains the written-text message from the original Topical-Chat dataset, and the `"message_sim_asr"` key contains the corresponding error-simulated message. ### NOTES - The error simulator was not specifically optimized/trained to simulate errors for open-domain dialog. - The no-punctuation synthetic setting (NO-PUNC) from the paper is easy to enable using a regular expression (sample [here](https://github.com/facebookresearch/ParlAI/blob/1a10dd650662a787788d691eb4e0b7ed6233f88d/parlai/core/metrics.py#L59)), so no data is provided for this setting. ## Actual: For a small subset of the original Topical-Chat test sets, actual ASR errors were introduced. These errors are particularly important and helpful in studying multiple types of problems in open-domain dialog: entity recognition and linking, neural response generation, next utterance classification, etc. We hope these smaller, speech-based analogues of the original Topical-Chat test sets, titled `test_freq_audio` and `test_rare_audio`, serve as future benchmarks for speech-robustness of open-domain dialog models. From each of the original test sets, 40 uniquely representative dialogs were picked and English-speaking human subjects of various ethnicities were asked to verbally read the dialogs with their own audio setup and record their audio, resulting in phonetically rich test sets. Two automated transcription systems (A and B) were independently used to transcribe the collected audio, and each dialog transcription was aligned with the text of the original dialog based on edit distance followed by manual re-alignment to obtain the turn-level transcriptions. The transcriptions by systems A and B are in the `"message_asr_A"` and `"message_asr_B"` keys respectively. ### NOTES - Neither systems A nor B were specifically optimized for the use-case of transcribing open-domain dialog. Nor were they optimized to transcribe a verbal reading of written-text dialogs. - The WERs computed are higher than true ASR WERs because: - the ASR transcripts are not being compared against actual human transcriptions of the audio, rather, they are being compared against the original written-text messages that were supposed to be verbally read. - normalization of the ASR outputs against the original written-text was not performed, for example: `that's` vs. `that is`, `superpower` vs. `super power`, `222-0` vs. `222 to 0`. ## Citation If you use this dataset, please cite the following two papers: ### Topical-Chat ASR ``` @inproceedings{gopalakrishnan2020speechrobust, author={Gopalakrishnan, Karthik and Hedayatnia, Behnam and Wang, Longshaokan and Liu, Yang and Hakkani-Tür, Dilek}, title={{Are Neural Open-Domain Dialog Systems Robust to Speech Recognition Errors in the Dialog History? An Empirical Study}}, year={2020}, booktitle={INTERSPEECH} } ``` ### Topical-Chat ``` @inproceedings{gopalakrishnan2019topical, author={Gopalakrishnan, Karthik and Hedayatnia, Behnam and Chen, Qinlang and Gottardi, Anna and Kwatra, Sanjeev and Venkatesh, Anu and Gabriel, Raefer and Hakkani-Tür, Dilek}, title={{Topical-Chat: Towards Knowledge-Grounded Open-Domain Conversations}}, year={2019}, booktitle={INTERSPEECH} } ```
Conversational-Reasoning/Topical-ChatASR
[ "task_categories:conversational", "task_categories:text-classification", "task_categories:feature-extraction", "size_categories:100K<n<1M", "language:en", "license:cdla-sharing-1.0", "arxiv:1911.03378", "region:us" ]
2023-12-22T16:54:06+00:00
{"language": ["en"], "license": "cdla-sharing-1.0", "size_categories": ["100K<n<1M"], "task_categories": ["conversational", "text-classification", "feature-extraction"], "pretty_name": "Topical-Chat"}
2023-12-22T16:54:16+00:00
[ "1911.03378" ]
[ "en" ]
TAGS #task_categories-conversational #task_categories-text-classification #task_categories-feature-extraction #size_categories-100K<n<1M #language-English #license-cdla-sharing-1.0 #arxiv-1911.03378 #region-us
# Topical-Chat ASR: An ASR-augmented version of Topical-Chat This README describes Topical-Chat ASR, an augmentation of Topical-Chat with non-trivial synthetic and actual ASR hypotheses. ## Synthetic: '/TopicalChatASR/synthetic' For each file in the original Topical-Chat dataset, non-trivial synthetic ASR hypotheses are constructed at four different corpus-level target Word Error Rates (WER). We used the ASR error simulator method based on n-gram confusion matrix and trained the simulator on transcribed ASR output from an internal user study. For a detailed description of the adopted simulation method, see: Investigation of Error Simulation Techniques for Learning Dialog Policies for Conversational Error Recovery. The target WERs chosen for simulation were 0.1, 0.15, 0.2 and 0.3. For each target WER, the errors were simulated with a single random seed for train and five random seeds for validation and test splits. Thus, for each target WER, there are: 1. 1 file for training 2. 5 files for 'valid_freq', 5 files for 'valid_rare' 3. 5 files for 'test_freq', 5 files for 'test_rare' For each turn in each dialog, the '"message"' key contains the written-text message from the original Topical-Chat dataset, and the '"message_sim_asr"' key contains the corresponding error-simulated message. ### NOTES - The error simulator was not specifically optimized/trained to simulate errors for open-domain dialog. - The no-punctuation synthetic setting (NO-PUNC) from the paper is easy to enable using a regular expression (sample here), so no data is provided for this setting. ## Actual: For a small subset of the original Topical-Chat test sets, actual ASR errors were introduced. These errors are particularly important and helpful in studying multiple types of problems in open-domain dialog: entity recognition and linking, neural response generation, next utterance classification, etc. We hope these smaller, speech-based analogues of the original Topical-Chat test sets, titled 'test_freq_audio' and 'test_rare_audio', serve as future benchmarks for speech-robustness of open-domain dialog models. From each of the original test sets, 40 uniquely representative dialogs were picked and English-speaking human subjects of various ethnicities were asked to verbally read the dialogs with their own audio setup and record their audio, resulting in phonetically rich test sets. Two automated transcription systems (A and B) were independently used to transcribe the collected audio, and each dialog transcription was aligned with the text of the original dialog based on edit distance followed by manual re-alignment to obtain the turn-level transcriptions. The transcriptions by systems A and B are in the '"message_asr_A"' and '"message_asr_B"' keys respectively. ### NOTES - Neither systems A nor B were specifically optimized for the use-case of transcribing open-domain dialog. Nor were they optimized to transcribe a verbal reading of written-text dialogs. - The WERs computed are higher than true ASR WERs because: - the ASR transcripts are not being compared against actual human transcriptions of the audio, rather, they are being compared against the original written-text messages that were supposed to be verbally read. - normalization of the ASR outputs against the original written-text was not performed, for example: 'that's' vs. 'that is', 'superpower' vs. 'super power', '222-0' vs. '222 to 0'. If you use this dataset, please cite the following two papers: ### Topical-Chat ASR ### Topical-Chat
[ "# Topical-Chat ASR: An ASR-augmented version of Topical-Chat\n\nThis README describes Topical-Chat ASR, an augmentation of Topical-Chat with non-trivial synthetic and actual ASR hypotheses.", "## Synthetic: '/TopicalChatASR/synthetic'\nFor each file in the original Topical-Chat dataset, non-trivial synthetic ASR hypotheses are constructed at four different corpus-level target Word Error Rates (WER). We used the ASR error simulator method based on n-gram confusion matrix and trained the simulator on transcribed ASR output from an internal user study. For a detailed description of the adopted simulation method, see: Investigation of Error Simulation Techniques for Learning Dialog Policies for Conversational Error Recovery.\n\nThe target WERs chosen for simulation were 0.1, 0.15, 0.2 and 0.3. For each target WER, the errors were simulated with a single random seed for train and five random seeds for validation and test splits. Thus, for each target WER, there are:\n1. 1 file for training\n2. 5 files for 'valid_freq', 5 files for 'valid_rare'\n3. 5 files for 'test_freq', 5 files for 'test_rare'\n\nFor each turn in each dialog, the '\"message\"' key contains the written-text message from the original Topical-Chat dataset, and the '\"message_sim_asr\"' key contains the corresponding error-simulated message.", "### NOTES\n- The error simulator was not specifically optimized/trained to simulate errors for open-domain dialog.\n- The no-punctuation synthetic setting (NO-PUNC) from the paper is easy to enable using a regular expression (sample here), so no data is provided for this setting.", "## Actual:\nFor a small subset of the original Topical-Chat test sets, actual ASR errors were introduced. These errors are particularly important and helpful in studying multiple types of problems in open-domain dialog: entity recognition and linking, neural response generation, next utterance classification, etc. We hope these smaller, speech-based analogues of the original Topical-Chat test sets, titled 'test_freq_audio' and 'test_rare_audio', serve as future benchmarks for speech-robustness of open-domain dialog models.\n\nFrom each of the original test sets, 40 uniquely representative dialogs were picked and English-speaking human subjects of various ethnicities were asked to verbally read the dialogs with their own audio setup and record their audio, resulting in phonetically rich test sets.\n\nTwo automated transcription systems (A and B) were independently used to transcribe the collected audio, and each dialog transcription was aligned with the text of the original dialog based on edit distance followed by manual re-alignment to obtain the turn-level transcriptions. The transcriptions by systems A and B are in the '\"message_asr_A\"' and '\"message_asr_B\"' keys respectively.", "### NOTES\n- Neither systems A nor B were specifically optimized for the use-case of transcribing open-domain dialog. Nor were they optimized to transcribe a verbal reading of written-text dialogs.\n\n- The WERs computed are higher than true ASR WERs because:\n\n - the ASR transcripts are not being compared against actual human transcriptions of the audio, rather, they are being compared against the original written-text messages that were supposed to be verbally read.\n\n - normalization of the ASR outputs against the original written-text was not performed, for example: 'that's' vs. 'that is', 'superpower' vs. 'super power', '222-0' vs. '222 to 0'.\n\n\nIf you use this dataset, please cite the following two papers:", "### Topical-Chat ASR", "### Topical-Chat" ]
[ "TAGS\n#task_categories-conversational #task_categories-text-classification #task_categories-feature-extraction #size_categories-100K<n<1M #language-English #license-cdla-sharing-1.0 #arxiv-1911.03378 #region-us \n", "# Topical-Chat ASR: An ASR-augmented version of Topical-Chat\n\nThis README describes Topical-Chat ASR, an augmentation of Topical-Chat with non-trivial synthetic and actual ASR hypotheses.", "## Synthetic: '/TopicalChatASR/synthetic'\nFor each file in the original Topical-Chat dataset, non-trivial synthetic ASR hypotheses are constructed at four different corpus-level target Word Error Rates (WER). We used the ASR error simulator method based on n-gram confusion matrix and trained the simulator on transcribed ASR output from an internal user study. For a detailed description of the adopted simulation method, see: Investigation of Error Simulation Techniques for Learning Dialog Policies for Conversational Error Recovery.\n\nThe target WERs chosen for simulation were 0.1, 0.15, 0.2 and 0.3. For each target WER, the errors were simulated with a single random seed for train and five random seeds for validation and test splits. Thus, for each target WER, there are:\n1. 1 file for training\n2. 5 files for 'valid_freq', 5 files for 'valid_rare'\n3. 5 files for 'test_freq', 5 files for 'test_rare'\n\nFor each turn in each dialog, the '\"message\"' key contains the written-text message from the original Topical-Chat dataset, and the '\"message_sim_asr\"' key contains the corresponding error-simulated message.", "### NOTES\n- The error simulator was not specifically optimized/trained to simulate errors for open-domain dialog.\n- The no-punctuation synthetic setting (NO-PUNC) from the paper is easy to enable using a regular expression (sample here), so no data is provided for this setting.", "## Actual:\nFor a small subset of the original Topical-Chat test sets, actual ASR errors were introduced. These errors are particularly important and helpful in studying multiple types of problems in open-domain dialog: entity recognition and linking, neural response generation, next utterance classification, etc. We hope these smaller, speech-based analogues of the original Topical-Chat test sets, titled 'test_freq_audio' and 'test_rare_audio', serve as future benchmarks for speech-robustness of open-domain dialog models.\n\nFrom each of the original test sets, 40 uniquely representative dialogs were picked and English-speaking human subjects of various ethnicities were asked to verbally read the dialogs with their own audio setup and record their audio, resulting in phonetically rich test sets.\n\nTwo automated transcription systems (A and B) were independently used to transcribe the collected audio, and each dialog transcription was aligned with the text of the original dialog based on edit distance followed by manual re-alignment to obtain the turn-level transcriptions. The transcriptions by systems A and B are in the '\"message_asr_A\"' and '\"message_asr_B\"' keys respectively.", "### NOTES\n- Neither systems A nor B were specifically optimized for the use-case of transcribing open-domain dialog. Nor were they optimized to transcribe a verbal reading of written-text dialogs.\n\n- The WERs computed are higher than true ASR WERs because:\n\n - the ASR transcripts are not being compared against actual human transcriptions of the audio, rather, they are being compared against the original written-text messages that were supposed to be verbally read.\n\n - normalization of the ASR outputs against the original written-text was not performed, for example: 'that's' vs. 'that is', 'superpower' vs. 'super power', '222-0' vs. '222 to 0'.\n\n\nIf you use this dataset, please cite the following two papers:", "### Topical-Chat ASR", "### Topical-Chat" ]
[ 75, 57, 298, 72, 295, 183, 8, 6 ]
[ "passage: TAGS\n#task_categories-conversational #task_categories-text-classification #task_categories-feature-extraction #size_categories-100K<n<1M #language-English #license-cdla-sharing-1.0 #arxiv-1911.03378 #region-us \n# Topical-Chat ASR: An ASR-augmented version of Topical-Chat\n\nThis README describes Topical-Chat ASR, an augmentation of Topical-Chat with non-trivial synthetic and actual ASR hypotheses.## Synthetic: '/TopicalChatASR/synthetic'\nFor each file in the original Topical-Chat dataset, non-trivial synthetic ASR hypotheses are constructed at four different corpus-level target Word Error Rates (WER). We used the ASR error simulator method based on n-gram confusion matrix and trained the simulator on transcribed ASR output from an internal user study. For a detailed description of the adopted simulation method, see: Investigation of Error Simulation Techniques for Learning Dialog Policies for Conversational Error Recovery.\n\nThe target WERs chosen for simulation were 0.1, 0.15, 0.2 and 0.3. For each target WER, the errors were simulated with a single random seed for train and five random seeds for validation and test splits. Thus, for each target WER, there are:\n1. 1 file for training\n2. 5 files for 'valid_freq', 5 files for 'valid_rare'\n3. 5 files for 'test_freq', 5 files for 'test_rare'\n\nFor each turn in each dialog, the '\"message\"' key contains the written-text message from the original Topical-Chat dataset, and the '\"message_sim_asr\"' key contains the corresponding error-simulated message.### NOTES\n- The error simulator was not specifically optimized/trained to simulate errors for open-domain dialog.\n- The no-punctuation synthetic setting (NO-PUNC) from the paper is easy to enable using a regular expression (sample here), so no data is provided for this setting." ]
f31ce6442f5f844e9b9d439a76c5dbb6af284ec7
# 🚀 GSM8K training set The original answer is "\n#### Value" and now is "\n#### Value\nThe answer is: Value", and the answer is cleaned, which is **consistent with the answer format with "meta-math/MetaMathQA"**. ## 💻 Dataset Usage Run the following command to load the data: ```python from datasets import load_dataset dataset = load_dataset("shuyuej/metamath_gsm8k") dataset = dataset['train'] print(dataset) ``` # 📝 Dataset modification codes ```python # coding=utf-8 import re import jsonlines from datasets import load_dataset, Features, Value def clean_up(sentence): # Find all the locations of "<<" matches = [match.start() for match in re.finditer(r'<<', sentence)] for match in matches: # Get the left 20 characters of each "<<" left_chars = sentence[match-20:match] # Replace "x" or "X" to "*" if they are in the left 20 characters modified_chars = sentence[match-20:match].replace('x', '*').replace('X', '*') # Modify the original sentence if 'x' in left_chars or 'X' in left_chars: sentence = sentence.replace(left_chars, modified_chars) ############################################################################################################## # Define a pattern to match text between "<< and >>" pattern = r"<<(.*?)>>" # Use re.sub to replace matched patterns with an empty string sentence = re.sub(pattern, "", sentence) ############################################################################################################## # Find all occurrences of "*" asterisks = [i for i, char in enumerate(sentence) if char == '*'] # Check and add spaces around "*" for index in reversed(asterisks): if index > 0 and index < len(sentence) - 1 and sentence[index - 1] != ' ' and sentence[index + 1] != ' ': sentence = sentence[:index] + ' ' + sentence[index] + ' ' + sentence[index + 1:] elif index > 0 and index < len(sentence) - 1 and sentence[index - 1] != ' ' and sentence[index + 1] == ' ': sentence = sentence[:index] + ' ' + sentence[index] + sentence[index + 1:] elif index > 0 and index < len(sentence) - 1 and sentence[index - 1] == ' ' and sentence[index + 1] != ' ': sentence = sentence[:index] + sentence[index] + ' ' + sentence[index + 1:] ############################################################################################################## # # Find all occurrences of "/" # asterisks = [i for i, char in enumerate(sentence) if char == '/'] # # # Check and add spaces around "/" # for index in reversed(asterisks): # if index > 0 and index < len(sentence) - 1 and sentence[index - 1] != ' ' and sentence[index + 1] != ' ': # sentence = sentence[:index] + ' ' + sentence[index] + ' ' + sentence[index + 1:] # elif index > 0 and index < len(sentence) - 1 and sentence[index - 1] != ' ' and sentence[index + 1] == ' ': # sentence = sentence[:index] + ' ' + sentence[index] + sentence[index + 1:] # elif index > 0 and index < len(sentence) - 1 and sentence[index - 1] == ' ' and sentence[index + 1] != ' ': # sentence = sentence[:index] + sentence[index] + ' ' + sentence[index + 1:] ############################################################################################################## # Find all occurrences of "+" asterisks = [i for i, char in enumerate(sentence) if char == '+'] # Check and add spaces around "+" for index in reversed(asterisks): if index > 0 and index < len(sentence) - 1 and sentence[index - 1] != ' ' and sentence[index + 1] != ' ': sentence = sentence[:index] + ' ' + sentence[index] + ' ' + sentence[index + 1:] elif index > 0 and index < len(sentence) - 1 and sentence[index - 1] != ' ' and sentence[index + 1] == ' ': sentence = sentence[:index] + ' ' + sentence[index] + sentence[index + 1:] elif index > 0 and index < len(sentence) - 1 and sentence[index - 1] == ' ' and sentence[index + 1] != ' ': sentence = sentence[:index] + sentence[index] + ' ' + sentence[index + 1:] ############################################################################################################## # Find all occurrences of "-" asterisks = [i for i, char in enumerate(sentence) if char == '-'] # Check and add spaces around "-" for index in reversed(asterisks): if index > 0 and index < len(sentence) - 1 and sentence[index - 1] != ' ' and sentence[index + 1] != ' ': sentence = sentence[:index] + ' ' + sentence[index] + ' ' + sentence[index + 1:] elif index > 0 and index < len(sentence) - 1 and sentence[index - 1] != ' ' and sentence[index + 1] == ' ': sentence = sentence[:index] + ' ' + sentence[index] + sentence[index + 1:] elif index > 0 and index < len(sentence) - 1 and sentence[index - 1] == ' ' and sentence[index + 1] != ' ': sentence = sentence[:index] + sentence[index] + ' ' + sentence[index + 1:] ############################################################################################################## # Find all occurrences of "=" asterisks = [i for i, char in enumerate(sentence) if char == '='] # Check and add spaces around "=" for index in reversed(asterisks): if index > 0 and index < len(sentence) - 1 and sentence[index - 1] != ' ' and sentence[index + 1] != ' ': sentence = sentence[:index] + ' ' + sentence[index] + ' ' + sentence[index + 1:] elif index > 0 and index < len(sentence) - 1 and sentence[index - 1] != ' ' and sentence[index + 1] == ' ': sentence = sentence[:index] + ' ' + sentence[index] + sentence[index + 1:] elif index > 0 and index < len(sentence) - 1 and sentence[index - 1] == ' ' and sentence[index + 1] != ' ': sentence = sentence[:index] + sentence[index] + ' ' + sentence[index + 1:] ############################################################################################################## # Find all occurrences of "." dots_locations = [match.start() for match in re.finditer(r'\.', sentence)] # Check and modify "." if the left side is space and the right side is a numerical number for dot_location in reversed(dots_locations): if sentence[dot_location - 1].isspace() and sentence[dot_location + 1].isdigit(): sentence = sentence[:dot_location] + '0' + sentence[dot_location:] ############################################################################################################## # Check if there is a "." before "\n#### " if ".\n#### " not in sentence: # If not, add a "." sentence = sentence.replace("\n#### ", ".\n#### ") return sentence # Retrieve the path of training and testing databases context_feat = Features({"question": Value(dtype='string', id=None), "answer": Value(dtype='string', id=None)}) train_set = load_dataset('json', data_files='train.jsonl', split='train', features=context_feat) data = [] for example in train_set: number = example['answer'].split('#### ')[1] number = int(number.replace(',', '')) append = "\nThe answer is: " + str(number) answer = example['answer'] + append answer = clean_up(sentence=answer) question = example['question'] data.append({"question": question, "answer": answer}) # Save the modified data to a jsonl file output_file = 'gsm8k_train.jsonl' with jsonlines.open(output_file, 'w') as writer: writer.write_all(data) print(f"Modified data saved to {output_file}") ```
shuyuej/metamath_gsm8k
[ "license:apache-2.0", "region:us" ]
2023-12-22T16:56:33+00:00
{"license": "apache-2.0"}
2024-01-25T19:44:59+00:00
[]
[]
TAGS #license-apache-2.0 #region-us
# GSM8K training set The original answer is "\n#### Value" and now is "\n#### Value\nThe answer is: Value", and the answer is cleaned, which is consistent with the answer format with "meta-math/MetaMathQA". ## Dataset Usage Run the following command to load the data: # Dataset modification codes
[ "# GSM8K training set\nThe original answer is \"\\n#### Value\" and now is \"\\n#### Value\\nThe answer is: Value\", and the answer is cleaned, which is consistent with the answer format with \"meta-math/MetaMathQA\".", "## Dataset Usage\nRun the following command to load the data:", "# Dataset modification codes" ]
[ "TAGS\n#license-apache-2.0 #region-us \n", "# GSM8K training set\nThe original answer is \"\\n#### Value\" and now is \"\\n#### Value\\nThe answer is: Value\", and the answer is cleaned, which is consistent with the answer format with \"meta-math/MetaMathQA\".", "## Dataset Usage\nRun the following command to load the data:", "# Dataset modification codes" ]
[ 14, 59, 14, 6 ]
[ "passage: TAGS\n#license-apache-2.0 #region-us \n# GSM8K training set\nThe original answer is \"\\n#### Value\" and now is \"\\n#### Value\\nThe answer is: Value\", and the answer is cleaned, which is consistent with the answer format with \"meta-math/MetaMathQA\".## Dataset Usage\nRun the following command to load the data:# Dataset modification codes" ]
fee4a71dd8ce5b471dd46e1b7aab2acbd1b9e1be
# Topical-Chat We introduce Topical-Chat, a knowledge-grounded human-human conversation dataset where the underlying knowledge spans 8 broad topics and conversation partners don’t have explicitly defined roles. Topical-Chat broadly consists of two types of files: - Conversations: JSON files containing conversations between pairs of Amazon Mechanical Turk workers. - Reading Sets: JSON files containing knowledge sections rendered as reading content to the Turkers having conversations. For detailed information about the dataset, modeling benchmarking experiments and evaluation results, please refer to our [paper](https://arxiv.org/abs/2308.11995). ## Dataset ### Statistics: | Stat | Train | Valid Freq. | Valid Rare | Test Freq. | Test Rare | All | | ---- | ---- | ---- | ---- | ---- | ---- | ---- | |# of conversations | 8628 | 539 | 539 | 539 | 539 | 10784 | |# of utterances | 188378 | 11681 | 11692 | 11760 | 11770 | 235281 | |average # of turns per conversation | 21.8 | 21.6 | 21.7 | 21.8 | 21.8 | 21.8 | |average length of utterance | 19.5 | 19.8 | 19.8 | 19.5 | 19.5 | 19.6 | ### Split: The data is split into 5 distinct groups: *train*, *valid frequent*, *valid rare*, *test frequent* and *test rare*. The frequent set contains entities frequently seen in the training set. The rare set contains entities that were infrequently seen in the training set. ### Configuration Type: For each conversation to be collected, we applied a random knowledge configuration from a pre-defined list of configurations, to construct a pair of reading sets to be rendered to the partnered Turkers. Configurations were defined to impose varying degrees of knowledge symmetry or asymmetry between partner Turkers, leading to the collection of a wide variety of conversations. ![Reading sets for Turkers 1 and 2 in Config A](images/configA.png) ![Reading sets for Turkers 1 and 2 in Config B](images/configB.png) ![Reading sets for Turkers 1 and 2 in Config C&D](images/configCD.png) ### Conversations: **Each JSONL file in `conversations/` has the following format:** ``` { <conversation_id>: { “article_url”: <article url>, “config”: <config>, # one of A, B, C, D “content”: [ # ordered list of conversation turns { “agent”: “agent_1”, # or “agent_2”, “message” : <message text>, “sentiment”: <text>, “knowledge_source” : [“AS1”, “Personal Knowledge”, ...], “turn_rating”: “Poor”, },… ], “conversation_rating”: { “agent_1”: “Good”, “agent_2”: “Excellent” } },… } ``` - conversation_id: A unique identifier for a conversation in Topical-Chat - article_url: URL pointing to the Washington Post article associated with a conversation - config: The knowledge configuration applied to obtain a pair of reading sets for a conversation - content: An ordered list of conversation turns - agent: An identifier for the Turker who generated the message - message: The message generated by the agent - sentiment: Self-annotation of the sentiment of the message - knowledge_source: Self-annotation of the section within the agent's reading set used to generate this message - turn_rating: Partner-annotation of the quality of the message - conversation_rating: Self-annotation of the quality of the conversation - agent_1: Rating of the conversation by Turker 1 - agent_2: Rating of the conversation by Turker 2 ``` - conversation_id: A unique identifier for a conversation in Topical-Chat - config: The knowledge configuration applied to obtain a pair of reading sets for a conversation - agent_{1/2}: Contains the factual sections in this agent's reading set - FS{1/2/3}: Identifier for a factual section - entity: A real-world entity - shortened_wiki_lead_section: A shortened version of the Wikipedia lead section of the entity - summarized_wiki_lead_section: A (TextRank) summarized version of the Wikipedia lead section of the entity - fun_facts: Crowdsourced and manually curated fun facts about the entity from Reddit's r/todayilearned subreddit - article: A Washington Post article common to both partners' reading sets - url: URL pointing to the Washington Post article associated with a conversation - headline: The headline of the Washington Post article - AS{1/2/3/4}: A chunk of the body of the Washington Post article ``` ## Citation If you use Topical-Chat in your work, please cite with the following: ``` @inproceedings{gopalakrishnan2019topical, author={Karthik Gopalakrishnan and Behnam Hedayatnia and Qinlang Chen and Anna Gottardi and Sanjeev Kwatra and Anu Venkatesh and Raefer Gabriel and Dilek Hakkani-Tür}, title={{Topical-Chat: Towards Knowledge-Grounded Open-Domain Conversations}}, year=2019, booktitle={Proc. Interspeech 2019}, pages={1891--1895}, doi={10.21437/Interspeech.2019-3079}, url={http://dx.doi.org/10.21437/Interspeech.2019-3079} } ``` ``` Gopalakrishnan, Karthik, et al. "Topical-Chat: Towards Knowledge-Grounded Open-Domain Conversations.", Proc. INTERSPEECH 2019 ``` ## Acknowledgements We thank Anju Khatri, Anjali Chadha and Mohammad Shami for their help with the public release of the dataset. We thank Jeff Nunn and Yi Pan for their early contributions to the dataset collection.
Conversational-Reasoning/Topical-Chat
[ "arxiv:2308.11995", "region:us" ]
2023-12-22T16:56:40+00:00
{}
2023-12-22T16:56:42+00:00
[ "2308.11995" ]
[]
TAGS #arxiv-2308.11995 #region-us
Topical-Chat ============ We introduce Topical-Chat, a knowledge-grounded human-human conversation dataset where the underlying knowledge spans 8 broad topics and conversation partners don’t have explicitly defined roles. Topical-Chat broadly consists of two types of files: * Conversations: JSON files containing conversations between pairs of Amazon Mechanical Turk workers. * Reading Sets: JSON files containing knowledge sections rendered as reading content to the Turkers having conversations. For detailed information about the dataset, modeling benchmarking experiments and evaluation results, please refer to our paper. Dataset ------- ### Statistics: ### Split: The data is split into 5 distinct groups: *train*, *valid frequent*, *valid rare*, *test frequent* and *test rare*. The frequent set contains entities frequently seen in the training set. The rare set contains entities that were infrequently seen in the training set. ### Configuration Type: For each conversation to be collected, we applied a random knowledge configuration from a pre-defined list of configurations, to construct a pair of reading sets to be rendered to the partnered Turkers. Configurations were defined to impose varying degrees of knowledge symmetry or asymmetry between partner Turkers, leading to the collection of a wide variety of conversations. !Reading sets for Turkers 1 and 2 in Config A !Reading sets for Turkers 1 and 2 in Config B !Reading sets for Turkers 1 and 2 in Config C&D ### Conversations: Each JSONL file in 'conversations/' has the following format: * conversation\_id: A unique identifier for a conversation in Topical-Chat * article\_url: URL pointing to the Washington Post article associated with a conversation * config: The knowledge configuration applied to obtain a pair of reading sets for a conversation * content: An ordered list of conversation turns + agent: An identifier for the Turker who generated the message + message: The message generated by the agent + sentiment: Self-annotation of the sentiment of the message + knowledge\_source: Self-annotation of the section within the agent's reading set used to generate this message + turn\_rating: Partner-annotation of the quality of the message * conversation\_rating: Self-annotation of the quality of the conversation + agent\_1: Rating of the conversation by Turker 1 + agent\_2: Rating of the conversation by Turker 2 If you use Topical-Chat in your work, please cite with the following: Acknowledgements ---------------- We thank Anju Khatri, Anjali Chadha and Mohammad Shami for their help with the public release of the dataset. We thank Jeff Nunn and Yi Pan for their early contributions to the dataset collection.
[ "### Statistics:", "### Split:\n\n\nThe data is split into 5 distinct groups: *train*, *valid frequent*,\n*valid rare*, *test frequent* and *test rare*. The frequent set\ncontains entities frequently seen in the training set. The rare set\ncontains entities that were infrequently seen in the training set.", "### Configuration Type:\n\n\nFor each conversation to be collected, we applied a random\nknowledge configuration from a pre-defined list of configurations,\nto construct a pair of reading sets to be rendered to the partnered\nTurkers. Configurations were defined to impose varying degrees of\nknowledge symmetry or asymmetry between partner Turkers, leading to\nthe collection of a wide variety of conversations.\n\n\n!Reading sets for Turkers 1 and 2 in Config A\n\n\n!Reading sets for Turkers 1 and 2 in Config B\n\n\n!Reading sets for Turkers 1 and 2 in Config C&D", "### Conversations:\n\n\nEach JSONL file in 'conversations/' has the following\nformat:\n\n\n* conversation\\_id: A unique identifier for a conversation in Topical-Chat\n* article\\_url: URL pointing to the Washington Post article associated\nwith a conversation\n* config: The knowledge configuration applied to obtain a pair of\nreading sets for a conversation\n* content: An ordered list of conversation turns\n\t+ agent: An identifier for the Turker who generated the message\n\t+ message: The message generated by the agent\n\t+ sentiment: Self-annotation of the sentiment of the message\n\t+ knowledge\\_source: Self-annotation of the section within\n\tthe agent's reading set used to generate this message\n\t+ turn\\_rating: Partner-annotation of the quality of the message\n* conversation\\_rating: Self-annotation of the quality of the conversation\n\t+ agent\\_1: Rating of the conversation by Turker 1\n\t+ agent\\_2: Rating of the conversation by Turker 2\n\n\nIf you use Topical-Chat in your work, please cite with the following:\n\n\nAcknowledgements\n----------------\n\n\nWe thank Anju Khatri, Anjali Chadha and\nMohammad Shami for their help with the public release of\nthe dataset. We thank Jeff Nunn and Yi Pan for their\nearly contributions to the dataset collection." ]
[ "TAGS\n#arxiv-2308.11995 #region-us \n", "### Statistics:", "### Split:\n\n\nThe data is split into 5 distinct groups: *train*, *valid frequent*,\n*valid rare*, *test frequent* and *test rare*. The frequent set\ncontains entities frequently seen in the training set. The rare set\ncontains entities that were infrequently seen in the training set.", "### Configuration Type:\n\n\nFor each conversation to be collected, we applied a random\nknowledge configuration from a pre-defined list of configurations,\nto construct a pair of reading sets to be rendered to the partnered\nTurkers. Configurations were defined to impose varying degrees of\nknowledge symmetry or asymmetry between partner Turkers, leading to\nthe collection of a wide variety of conversations.\n\n\n!Reading sets for Turkers 1 and 2 in Config A\n\n\n!Reading sets for Turkers 1 and 2 in Config B\n\n\n!Reading sets for Turkers 1 and 2 in Config C&D", "### Conversations:\n\n\nEach JSONL file in 'conversations/' has the following\nformat:\n\n\n* conversation\\_id: A unique identifier for a conversation in Topical-Chat\n* article\\_url: URL pointing to the Washington Post article associated\nwith a conversation\n* config: The knowledge configuration applied to obtain a pair of\nreading sets for a conversation\n* content: An ordered list of conversation turns\n\t+ agent: An identifier for the Turker who generated the message\n\t+ message: The message generated by the agent\n\t+ sentiment: Self-annotation of the sentiment of the message\n\t+ knowledge\\_source: Self-annotation of the section within\n\tthe agent's reading set used to generate this message\n\t+ turn\\_rating: Partner-annotation of the quality of the message\n* conversation\\_rating: Self-annotation of the quality of the conversation\n\t+ agent\\_1: Rating of the conversation by Turker 1\n\t+ agent\\_2: Rating of the conversation by Turker 2\n\n\nIf you use Topical-Chat in your work, please cite with the following:\n\n\nAcknowledgements\n----------------\n\n\nWe thank Anju Khatri, Anjali Chadha and\nMohammad Shami for their help with the public release of\nthe dataset. We thank Jeff Nunn and Yi Pan for their\nearly contributions to the dataset collection." ]
[ 14, 5, 74, 139, 282 ]
[ "passage: TAGS\n#arxiv-2308.11995 #region-us \n### Statistics:### Split:\n\n\nThe data is split into 5 distinct groups: *train*, *valid frequent*,\n*valid rare*, *test frequent* and *test rare*. The frequent set\ncontains entities frequently seen in the training set. The rare set\ncontains entities that were infrequently seen in the training set.### Configuration Type:\n\n\nFor each conversation to be collected, we applied a random\nknowledge configuration from a pre-defined list of configurations,\nto construct a pair of reading sets to be rendered to the partnered\nTurkers. Configurations were defined to impose varying degrees of\nknowledge symmetry or asymmetry between partner Turkers, leading to\nthe collection of a wide variety of conversations.\n\n\n!Reading sets for Turkers 1 and 2 in Config A\n\n\n!Reading sets for Turkers 1 and 2 in Config B\n\n\n!Reading sets for Turkers 1 and 2 in Config C&D" ]
8d414c517d9a24c50c1ad92af5de4e8f53171644
NeuroZoom Reviews - Legit **[Brain Health Support Formula](https://neuroozoom.com/)**? Ingredients, Benefits and Price! NeuroZoom presents itself as an all-natural, safe-to-consume advanced formula designed to support healthy memory and concentration. Comprised of natural and effective components, this product claims to offer a wide range of benefits to cognitive functions. Dive into this concise NeuroZoom review to understand its potential advantages. 📣 𝐒𝐀𝐋𝐄 𝐈𝐒 𝐋𝐈𝐕𝐄 🇺🇸 🏬 𝐋𝐢𝐯𝐞 𝐒𝐚𝐥𝐞 𝐍𝐨𝐰 𝐎𝐟𝐟𝐞𝐫 𝐂𝐡𝐞𝐜𝐤 𝐍𝐨𝐰 📣 **[NeuroZoom Official Site](https://neuroozoom.com)** Understanding NeuroZoom: **[NeuroZoom](https://neuroozoom.com)** is promoted as a 35-in-1 memory essential formula aimed at supporting healthy memory, concentration, and mental acuity. How NeuroZoom Operates: This product is crafted to enhance memory and focus by addressing crucial elements that contribute to regular brain function. It contains carefully selected components to provide comprehensive assistance without the need for a prescription. NeuroZoom promotes synaptic transmission with neurotransmitters like dopamine, acetylcholine, serotonin, norepinephrine, GABA, and epinephrine. By optimizing neurotransmitter conditions, it enhances brain connectivity and provides the necessary chemical building blocks for the brain. Key Ingredients Inside NeuroZoom: NeuroZoom's ingredients are sourced from nature and aim to enhance mental performance. Some of the essential components include: Selenium: Supports mental health and regulates fundamental biological processes. Vitamin E: Known for its antioxidant properties, mitigating negative effects on the brain. Vitamin C: Reduces stress, anxiety, and depression symptoms, supporting the brain's defenses against damage. Riboflavin: Essential for energy production and maintaining healthy brain function. Niacin: Raises NAD+ levels, contributing to neuronal health and overall brain function. Vitamin B6: Reduces symptoms of depression, anxiety, and stress, enhancing cognitive functions. Thiamine: A potent antioxidant supporting amino acid production, blood flow, and brain function. Green Leaf Extract: Known for memory enhancement and mitigating age-related memory deterioration. Olive Leaf: Improves memory performance by regulating immune responses and stimulating CNS receptors. Chromium: Reduces brain fog, enhances memory, and aids in regaining focus. Biotin: Necessary for neurotransmitter production, promoting normal cognitive functions. 📣 𝐒𝐀𝐋𝐄 𝐈𝐒 𝐋𝐈𝐕𝐄 🇺🇸 🏬 𝐋𝐢𝐯𝐞 𝐒𝐚𝐥𝐞 𝐍𝐨𝐰 𝐎𝐟𝐟𝐞𝐫 𝐂𝐡𝐞𝐜𝐤 𝐍𝐨𝐰 📣 **[NeuroZoom Official Site](https://neuroozoom.com)** How to Consume NeuroZoom: NeuroZoom is available in capsule form, with each bottle containing 60 pills for a 30-day supply. The recommended dosage is two capsules daily, preferably with meals or as directed by a healthcare professional. To maximize benefits, it's suggested to use NeuroZoom consistently for 3 to 6 months. NeuroZoom Benefits: 100% natural and effective formula. Real results within days. Supports brain restoration. Promotes a healthy and sharp memory. Repairs damage and enhances connectivity. Non-GMO, vegan, and gluten-free. Delivers rapid and incredible results. Pricing & Discounts: NeuroZoom offers exclusive discounts on its official website: Single Bottle: $79 + $19.95 for shipping. Three Bottles: $177 ($59 per bottle) + free shipping and two Ebook Bonuses. Six Bottles: $294 ($49 per bottle) + free shipping and two Ebook Bonuses. A 100% money-back guarantee is provided within 60 days of purchase, allowing for a risk-free trial. 📣 𝐒𝐀𝐋𝐄 𝐈𝐒 𝐋𝐈𝐕𝐄 🇺🇸 🏬 𝐋𝐢𝐯𝐞 𝐒𝐚𝐥𝐞 𝐍𝐨𝐰 𝐎𝐟𝐟𝐞𝐫 𝐂𝐡𝐞𝐜𝐤 𝐍𝐨𝐰 📣 **[NeuroZoom Official Site](https://neuroozoom.com)** Final Thoughts: In conclusion, NeuroZoom emerges as a promising cognitive health solution with a patented formula of rare natural ingredients. With the potential to enhance memory and overall well-being, this product offers real results within a short period. It comes with a full 100% money-back guarantee, making it a risk-free investment. Don't miss the chance to improve your memory—get your NeuroZoom today! FAQs: How About NeuroZoom’s Additional Bonuses? For today only, order 6 bottles of NeuroZoom and receive 2 exclusive digital bonuses: Supercharge Your Body Biohacking Secrets Does NeuroZoom Offer Any Side Effects? NeuroZoom is free from harmful chemicals, artificial elements, and synthetic fillers. No side effects have been reported, making it a safe supplement for enhancing intelligence, energy, strength, and concentration. Are Added Ingredients Safe and Natural? Absolutely! NeuroZoom is crafted from the best plants and elements from the highest and purest places. Every ingredient is 100% natural, effective, and safe, carefully selected and added in perfect quantities to work synergistically. What If NeuroZoom Doesn’t Work For Me A 60-Day Money-Back Guarantee protects you. If NeuroZoom doesn’t work for you, a complete refund can be requested by sending the empty bottles within 60 days of purchase. Is NeuroZoom FDA Approved? While the FDA does not certify dietary supplement products, NeuroZoom is manufactured in an FDA-registered facility following GMP guidelines, ensuring its quality. Manufactured in the USA, NeuroZoom maintains high standards. Disclaimer: The views and opinions expressed in this sponsored article are those of the sponsor/author/agency and do not represent the stand and views of Mid-Day Group. Mid-Day Group disclaims any and all liability to any party, company, or product for any direct, indirect, implied, punitive, special, incidental, or consequential damages arising directly or indirectly from the use of this content. 📣 𝐒𝐀𝐋𝐄 𝐈𝐒 𝐋𝐈𝐕𝐄 🇺🇸 🏬 𝐋𝐢𝐯𝐞 𝐒𝐚𝐥𝐞 𝐍𝐨𝐰 𝐎𝐟𝐟𝐞𝐫 𝐂𝐡𝐞𝐜𝐤 𝐍𝐨𝐰 📣 **[NeuroZoom Official Site](https://neuroozoom.com)**
Neurozoom/Neurozoomsupplement
[ "license:unknown", "region:us" ]
2023-12-22T17:08:40+00:00
{"license": "unknown"}
2023-12-22T17:38:10+00:00
[]
[]
TAGS #license-unknown #region-us
NeuroZoom Reviews - Legit Brain Health Support Formula? Ingredients, Benefits and Price! NeuroZoom presents itself as an all-natural, safe-to-consume advanced formula designed to support healthy memory and concentration. Comprised of natural and effective components, this product claims to offer a wide range of benefits to cognitive functions. Dive into this concise NeuroZoom review to understand its potential advantages. 𝐒𝐀𝐋𝐄 𝐈𝐒 𝐋𝐈𝐕𝐄 🇺🇸 𝐋𝐢𝐯𝐞 𝐒𝐚𝐥𝐞 𝐍𝐨𝐰 𝐎𝐟𝐟𝐞𝐫 𝐂𝐡𝐞𝐜𝐤 𝐍𝐨𝐰 NeuroZoom Official Site Understanding NeuroZoom: NeuroZoom is promoted as a 35-in-1 memory essential formula aimed at supporting healthy memory, concentration, and mental acuity. How NeuroZoom Operates: This product is crafted to enhance memory and focus by addressing crucial elements that contribute to regular brain function. It contains carefully selected components to provide comprehensive assistance without the need for a prescription. NeuroZoom promotes synaptic transmission with neurotransmitters like dopamine, acetylcholine, serotonin, norepinephrine, GABA, and epinephrine. By optimizing neurotransmitter conditions, it enhances brain connectivity and provides the necessary chemical building blocks for the brain. Key Ingredients Inside NeuroZoom: NeuroZoom's ingredients are sourced from nature and aim to enhance mental performance. Some of the essential components include: Selenium: Supports mental health and regulates fundamental biological processes. Vitamin E: Known for its antioxidant properties, mitigating negative effects on the brain. Vitamin C: Reduces stress, anxiety, and depression symptoms, supporting the brain's defenses against damage. Riboflavin: Essential for energy production and maintaining healthy brain function. Niacin: Raises NAD+ levels, contributing to neuronal health and overall brain function. Vitamin B6: Reduces symptoms of depression, anxiety, and stress, enhancing cognitive functions. Thiamine: A potent antioxidant supporting amino acid production, blood flow, and brain function. Green Leaf Extract: Known for memory enhancement and mitigating age-related memory deterioration. Olive Leaf: Improves memory performance by regulating immune responses and stimulating CNS receptors. Chromium: Reduces brain fog, enhances memory, and aids in regaining focus. Biotin: Necessary for neurotransmitter production, promoting normal cognitive functions. 𝐒𝐀𝐋𝐄 𝐈𝐒 𝐋𝐈𝐕𝐄 🇺🇸 𝐋𝐢𝐯𝐞 𝐒𝐚𝐥𝐞 𝐍𝐨𝐰 𝐎𝐟𝐟𝐞𝐫 𝐂𝐡𝐞𝐜𝐤 𝐍𝐨𝐰 NeuroZoom Official Site How to Consume NeuroZoom: NeuroZoom is available in capsule form, with each bottle containing 60 pills for a 30-day supply. The recommended dosage is two capsules daily, preferably with meals or as directed by a healthcare professional. To maximize benefits, it's suggested to use NeuroZoom consistently for 3 to 6 months. NeuroZoom Benefits: 100% natural and effective formula. Real results within days. Supports brain restoration. Promotes a healthy and sharp memory. Repairs damage and enhances connectivity. Non-GMO, vegan, and gluten-free. Delivers rapid and incredible results. Pricing & Discounts: NeuroZoom offers exclusive discounts on its official website: Single Bottle: $79 + $19.95 for shipping. Three Bottles: $177 ($59 per bottle) + free shipping and two Ebook Bonuses. Six Bottles: $294 ($49 per bottle) + free shipping and two Ebook Bonuses. A 100% money-back guarantee is provided within 60 days of purchase, allowing for a risk-free trial. 𝐒𝐀𝐋𝐄 𝐈𝐒 𝐋𝐈𝐕𝐄 🇺🇸 𝐋𝐢𝐯𝐞 𝐒𝐚𝐥𝐞 𝐍𝐨𝐰 𝐎𝐟𝐟𝐞𝐫 𝐂𝐡𝐞𝐜𝐤 𝐍𝐨𝐰 NeuroZoom Official Site Final Thoughts: In conclusion, NeuroZoom emerges as a promising cognitive health solution with a patented formula of rare natural ingredients. With the potential to enhance memory and overall well-being, this product offers real results within a short period. It comes with a full 100% money-back guarantee, making it a risk-free investment. Don't miss the chance to improve your memory—get your NeuroZoom today! FAQs: How About NeuroZoom’s Additional Bonuses? For today only, order 6 bottles of NeuroZoom and receive 2 exclusive digital bonuses: Supercharge Your Body Biohacking Secrets Does NeuroZoom Offer Any Side Effects? NeuroZoom is free from harmful chemicals, artificial elements, and synthetic fillers. No side effects have been reported, making it a safe supplement for enhancing intelligence, energy, strength, and concentration. Are Added Ingredients Safe and Natural? Absolutely! NeuroZoom is crafted from the best plants and elements from the highest and purest places. Every ingredient is 100% natural, effective, and safe, carefully selected and added in perfect quantities to work synergistically. What If NeuroZoom Doesn’t Work For Me A 60-Day Money-Back Guarantee protects you. If NeuroZoom doesn’t work for you, a complete refund can be requested by sending the empty bottles within 60 days of purchase. Is NeuroZoom FDA Approved? While the FDA does not certify dietary supplement products, NeuroZoom is manufactured in an FDA-registered facility following GMP guidelines, ensuring its quality. Manufactured in the USA, NeuroZoom maintains high standards. Disclaimer: The views and opinions expressed in this sponsored article are those of the sponsor/author/agency and do not represent the stand and views of Mid-Day Group. Mid-Day Group disclaims any and all liability to any party, company, or product for any direct, indirect, implied, punitive, special, incidental, or consequential damages arising directly or indirectly from the use of this content. 𝐒𝐀𝐋𝐄 𝐈𝐒 𝐋𝐈𝐕𝐄 🇺🇸 𝐋𝐢𝐯𝐞 𝐒𝐚𝐥𝐞 𝐍𝐨𝐰 𝐎𝐟𝐟𝐞𝐫 𝐂𝐡𝐞𝐜𝐤 𝐍𝐨𝐰 NeuroZoom Official Site
[]
[ "TAGS\n#license-unknown #region-us \n" ]
[ 13 ]
[ "passage: TAGS\n#license-unknown #region-us \n" ]
269afe08ceda27a7ca03306d6d02d7a20e009e4e
# Dataset Card for "arc_easy" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ibragim-bad/arc_easy
[ "region:us" ]
2023-12-22T17:17:59+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "choices", "struct": [{"name": "label", "sequence": "string"}, {"name": "text", "sequence": "string"}]}, {"name": "answerKey", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 657514, "num_examples": 2376}, {"name": "train", "num_bytes": 619000, "num_examples": 2251}, {"name": "validation", "num_bytes": 157394, "num_examples": 570}], "download_size": 763157, "dataset_size": 1433908}}
2023-12-22T17:18:16+00:00
[]
[]
TAGS #region-us
# Dataset Card for "arc_easy" More Information needed
[ "# Dataset Card for \"arc_easy\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"arc_easy\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"arc_easy\"\n\nMore Information needed" ]
e1fdc017572822537a2125ba161afab11e10bff9
# Dataset Card for "arc_challenge" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ibragim-bad/arc_challenge
[ "region:us" ]
2023-12-22T17:18:18+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "choices", "struct": [{"name": "label", "sequence": "string"}, {"name": "text", "sequence": "string"}]}, {"name": "answerKey", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 375511, "num_examples": 1172}, {"name": "train", "num_bytes": 349760, "num_examples": 1119}, {"name": "validation", "num_bytes": 96660, "num_examples": 299}], "download_size": 449682, "dataset_size": 821931}}
2023-12-22T17:18:34+00:00
[]
[]
TAGS #region-us
# Dataset Card for "arc_challenge" More Information needed
[ "# Dataset Card for \"arc_challenge\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"arc_challenge\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"arc_challenge\"\n\nMore Information needed" ]
033274f4c894885e4d25d0bfa2f84eff1a6f1dc4
# Dataset for CSC 中文纠错数据集 # Dataset Description Chinese Spelling Correction (CSC) is a task to detect and correct misspelled characters in Chinese texts. 共计 120w 条数据,以下是数据来源 |数据集|语料|链接| |------|------|------| |SIGHAN+Wang271K 拼写纠错数据集|SIGHAN+Wang271K(27万条)|https://huggingface.co/datasets/shibing624/CSC| |ECSpell 拼写纠错数据集|包含法律、医疗、金融等领域|https://github.com/Aopolin-Lv/ECSpell| |CGED 语法纠错数据集|仅包含了2016和2021年的数据集|https://github.com/wdimmy/Automatic-Corpus-Generation?spm=a2c22.12282016.0.0.5f3e7398w7SL4P| |NLPCC 纠错数据集|包含语法纠错和拼写纠错|https://github.com/Arvid-pku/NLPCC2023_Shared_Task8 <br/>http://tcci.ccf.org.cn/conference/2023/dldoc/nacgec_training.zip<br/>http://tcci.ccf.org.cn/conference/2018/dldoc/trainingdata02.tar.gz| |pycorrector 语法纠错集|中文语法纠错数据集|https://github.com/shibing624/pycorrector/tree/llm/examples/data/grammar| 其余的数据集还可以看 - 中文文本纠错数据集汇总 (天池):https://tianchi.aliyun.com/dataset/138195 - NLPCC 2023中文语法纠错数据集:http://tcci.ccf.org.cn/conference/2023/taskdata.php # Languages The data in CSC are in Chinese. # Dataset Structure An example of "train" looks as follows: ```json { "conversations": [ {"from":"human","value":"对这个句子纠错\n\n以后,我一直以来自学汉语了。"}, {"from":"gpt","value":"从此以后,我就一直自学汉语了。"} ] } ``` # Contributions [Weaxs](https://github.com/Weaxs) 整理并上传
Weaxs/csc
[ "task_categories:text2text-generation", "size_categories:100M<n<1B", "language:zh", "license:apache-2.0", "chinese-spelling-check", "中文", "region:us" ]
2023-12-22T17:20:12+00:00
{"language": ["zh"], "license": "apache-2.0", "size_categories": ["100M<n<1B"], "task_categories": ["text2text-generation"], "tags": ["chinese-spelling-check", "\u4e2d\u6587"]}
2024-01-02T06:19:10+00:00
[]
[ "zh" ]
TAGS #task_categories-text2text-generation #size_categories-100M<n<1B #language-Chinese #license-apache-2.0 #chinese-spelling-check #中文 #region-us
Dataset for CSC =============== 中文纠错数据集 Dataset Description =================== Chinese Spelling Correction (CSC) is a task to detect and correct misspelled characters in Chinese texts. 共计 120w 条数据,以下是数据来源 数据集: SIGHAN+Wang271K 拼写纠错数据集, 语料: SIGHAN+Wang271K(27万条), 链接: URL 数据集: ECSpell 拼写纠错数据集, 语料: 包含法律、医疗、金融等领域, 链接: URL 数据集: CGED 语法纠错数据集, 语料: 仅包含了2016和2021年的数据集, 链接: URL 数据集: NLPCC 纠错数据集, 语料: 包含语法纠错和拼写纠错, 链接: URL URL/URL 数据集: pycorrector 语法纠错集, 语料: 中文语法纠错数据集, 链接: URL 其余的数据集还可以看 * 中文文本纠错数据集汇总 (天池):URL * NLPCC 2023中文语法纠错数据集:URL Languages ========= The data in CSC are in Chinese. Dataset Structure ================= An example of "train" looks as follows: Contributions ============= Weaxs 整理并上传
[]
[ "TAGS\n#task_categories-text2text-generation #size_categories-100M<n<1B #language-Chinese #license-apache-2.0 #chinese-spelling-check #中文 #region-us \n" ]
[ 54 ]
[ "passage: TAGS\n#task_categories-text2text-generation #size_categories-100M<n<1B #language-Chinese #license-apache-2.0 #chinese-spelling-check #中文 #region-us \n" ]
18c037dca067b667307da1d304524698f6526c1c
# FLAN-Small This repository is a reduced version of the data provided by the hardwork of: https://huggingface.co/datasets/imone/OpenOrca_FLAN. FLAN-Small amounts to ~10m examples sampled to approximately hold to the FLAN's final "submix" of: ``` { 'flan': 0.4, 't0': 0.32, 'niv2': 0.20, 'cot': 0.05, 'dialog': 0.03 } ``` Since the `cot` data is rather small -- this was sampled with replacement; consequently there are some duplicates. Some token length statistics: inputs: ``` {'min': 4, 'max': 176203, 'median': 215, '99_percentile': 1611, '75_percentile': 448, '90_percentile': 732} ``` targets: ``` {'min': 0, 'max': 71437, 'median': 7, '99_percentile': 266, '75_percentile': 30, '90_percentile': 67} ```
BadDepartment/FLAN-Small
[ "region:us" ]
2023-12-22T17:23:10+00:00
{}
2023-12-28T02:18:38+00:00
[]
[]
TAGS #region-us
# FLAN-Small This repository is a reduced version of the data provided by the hardwork of: URL FLAN-Small amounts to ~10m examples sampled to approximately hold to the FLAN's final "submix" of: Since the 'cot' data is rather small -- this was sampled with replacement; consequently there are some duplicates. Some token length statistics: inputs: targets:
[ "# FLAN-Small\n\nThis repository is a reduced version of the data provided by the hardwork of: URL\n\nFLAN-Small amounts to ~10m examples sampled to approximately hold to the FLAN's final \"submix\" of:\n\n\n\nSince the 'cot' data is rather small -- this was sampled with replacement; consequently there are some duplicates.\n\nSome token length statistics:\n\ninputs: \n\n\ntargets:" ]
[ "TAGS\n#region-us \n", "# FLAN-Small\n\nThis repository is a reduced version of the data provided by the hardwork of: URL\n\nFLAN-Small amounts to ~10m examples sampled to approximately hold to the FLAN's final \"submix\" of:\n\n\n\nSince the 'cot' data is rather small -- this was sampled with replacement; consequently there are some duplicates.\n\nSome token length statistics:\n\ninputs: \n\n\ntargets:" ]
[ 6, 96 ]
[ "passage: TAGS\n#region-us \n# FLAN-Small\n\nThis repository is a reduced version of the data provided by the hardwork of: URL\n\nFLAN-Small amounts to ~10m examples sampled to approximately hold to the FLAN's final \"submix\" of:\n\n\n\nSince the 'cot' data is rather small -- this was sampled with replacement; consequently there are some duplicates.\n\nSome token length statistics:\n\ninputs: \n\n\ntargets:" ]
e6ae5146998c0c6eac3206ebb9b3fbc72cf3564b
# MetaMath Dataset with "{"question": question, "answer": answer}" pairs. # 💻 Dataset Usage Run the following command to load the data: ```python from datasets import load_dataset dataset = load_dataset("shuyuej/MetaMathQA") dataset = dataset['train'] print(dataset) ``` # 📝 Dataset modification codes ```python # coding=utf-8 import jsonlines from datasets import load_dataset # Load the dataset dataset = load_dataset("meta-math/MetaMathQA") dataset = dataset["train"] data = [] for example in dataset: question = example['query'] answer = example['response'] data.append({"question": question, "answer": answer}) # Save the modified data to a jsonl file output_file = 'MetaMathQA.jsonl' with jsonlines.open(output_file, 'w') as writer: writer.write_all(data) print(f"Modified data saved to {output_file}") ```
shuyuej/MetaMathQA
[ "license:apache-2.0", "region:us" ]
2023-12-22T17:28:56+00:00
{"license": "apache-2.0"}
2024-01-25T19:44:12+00:00
[]
[]
TAGS #license-apache-2.0 #region-us
# MetaMath Dataset with "{"question": question, "answer": answer}" pairs. # Dataset Usage Run the following command to load the data: # Dataset modification codes
[ "# MetaMath Dataset with \"{\"question\": question, \"answer\": answer}\" pairs.", "# Dataset Usage\nRun the following command to load the data:", "# Dataset modification codes" ]
[ "TAGS\n#license-apache-2.0 #region-us \n", "# MetaMath Dataset with \"{\"question\": question, \"answer\": answer}\" pairs.", "# Dataset Usage\nRun the following command to load the data:", "# Dataset modification codes" ]
[ 14, 26, 14, 6 ]
[ "passage: TAGS\n#license-apache-2.0 #region-us \n# MetaMath Dataset with \"{\"question\": question, \"answer\": answer}\" pairs.# Dataset Usage\nRun the following command to load the data:# Dataset modification codes" ]
31f6b430634d477ac4f73eb36cf684d0c94b112b
# Dataset Card for "rmh_subset_large2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
thorirhrafn/rmh_subset_large2
[ "region:us" ]
2023-12-22T18:41:24+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}, {"split": "valid", "path": "data/valid-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1483105963, "num_examples": 564320}, {"name": "test", "num_bytes": 24682625, "num_examples": 10000}, {"name": "valid", "num_bytes": 4195960, "num_examples": 2000}], "download_size": 923801611, "dataset_size": 1511984548}}
2023-12-22T18:42:25+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rmh_subset_large2" More Information needed
[ "# Dataset Card for \"rmh_subset_large2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rmh_subset_large2\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rmh_subset_large2\"\n\nMore Information needed" ]
852f489ab3f943c042156eb5714c591c8681043b
# Dataset Card for "wikipedia_id_20231201" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
teddy-f-47/wikipedia_id_20231201
[ "region:us" ]
2023-12-22T19:05:37+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1133503236, "num_examples": 669160}], "download_size": 587745555, "dataset_size": 1133503236}}
2023-12-22T19:06:26+00:00
[]
[]
TAGS #region-us
# Dataset Card for "wikipedia_id_20231201" More Information needed
[ "# Dataset Card for \"wikipedia_id_20231201\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"wikipedia_id_20231201\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"wikipedia_id_20231201\"\n\nMore Information needed" ]
e6846c0d2b60cf5029e827160042218a914d88cb
# Dataset Card for "glaive_de_raw_750" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
jphme/glaive_de_raw_750
[ "region:us" ]
2023-12-22T19:07:58+00:00
{"dataset_info": {"features": [{"name": "chat_until_call", "dtype": "string"}, {"name": "chat_after_response", "dtype": "string"}, {"name": "sample_id", "dtype": "string"}, {"name": "conversations_pre", "list": [{"name": "from", "dtype": "string"}, {"name": "value", "dtype": "string"}]}, {"name": "conversations_post", "list": [{"name": "from", "dtype": "string"}, {"name": "value", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 1328601, "num_examples": 676}], "download_size": 634645, "dataset_size": 1328601}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-12-22T19:08:02+00:00
[]
[]
TAGS #region-us
# Dataset Card for "glaive_de_raw_750" More Information needed
[ "# Dataset Card for \"glaive_de_raw_750\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"glaive_de_raw_750\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"glaive_de_raw_750\"\n\nMore Information needed" ]
180b26104ae4ca1c20b518149bb445584178470a
# TMMLU+ : Large scale traditional chinese massive multitask language understanding <p align="center"> <img src="https://huggingface.co/datasets/ikala/tmmluplus/resolve/main/cover.png" alt="A close-up image of a neat paper note with a white background. The text 'TMMLU+' is written horizontally across the center of the note in bold, black. Join us to work in multimodal LLM : https://ikala.ai/recruit/" style="max-width: 400" width=400 /> </p> We present TMMLU+, a traditional Chinese massive multitask language understanding dataset. TMMLU+ is a multiple-choice question-answering dataset featuring 66 subjects, ranging from elementary to professional level. The TMMLU+ dataset is six times larger and contains more balanced subjects compared to its predecessor, [TMMLU](https://github.com/mtkresearch/MR-Models/tree/main/TC-Eval/data/TMMLU). We have included benchmark results in TMMLU+ from closed-source models and 20 open-weight Chinese large language models, with parameters ranging from 1.8B to 72B. The benchmark results show that Traditional Chinese variants still lag behind those trained on major Simplified Chinese models. ```python from datasets import load_dataset task_list = [ 'engineering_math', 'dentistry', 'traditional_chinese_medicine_clinical_medicine', 'clinical_psychology', 'technical', 'culinary_skills', 'mechanical', 'logic_reasoning', 'real_estate', 'general_principles_of_law', 'finance_banking', 'anti_money_laundering', 'ttqav2', 'marketing_management', 'business_management', 'organic_chemistry', 'advance_chemistry', 'physics', 'secondary_physics', 'human_behavior', 'national_protection', 'jce_humanities', 'politic_science', 'agriculture', 'official_document_management', 'financial_analysis', 'pharmacy', 'educational_psychology', 'statistics_and_machine_learning', 'management_accounting', 'introduction_to_law', 'computer_science', 'veterinary_pathology', 'accounting', 'fire_science', 'optometry', 'insurance_studies', 'pharmacology', 'taxation', 'trust_practice', 'geography_of_taiwan', 'physical_education', 'auditing', 'administrative_law', 'education_(profession_level)', 'economics', 'veterinary_pharmacology', 'nautical_science', 'occupational_therapy_for_psychological_disorders', 'basic_medical_science', 'macroeconomics', 'trade', 'chinese_language_and_literature', 'tve_design', 'junior_science_exam', 'junior_math_exam', 'junior_chinese_exam', 'junior_social_studies', 'tve_mathematics', 'tve_chinese_language', 'tve_natural_sciences', 'junior_chemistry', 'music', 'education', 'three_principles_of_people', 'taiwanese_hokkien' ] for task in task_list: val = load_dataset('ikala/tmmluplus', task)['validation'] dev = load_dataset('ikala/tmmluplus', task)['train'] test = load_dataset('ikala/tmmluplus', task)['test'] ``` For each dataset split ```python for row in test: print(row) break >> Dataset({ features: ['question', 'A', 'B', 'C', 'D', 'answer'], num_rows: 11 }) ``` Statistic on all four categories : STEM, Social Science, Humanities, Other | Category | Test | Dev | Validation | |----------------------------------|-------|------|------------| | STEM | 3458 | 70 | 385 | | Social Sciences | 5958 | 90 | 665 | | Humanities | 1763 | 35 | 197 | | Other (Business, Health, Misc.) | 8939 | 135 | 995 | | **Total** | 20118 | 330 | 2242 | ## Benchmark on direct prompting | model | STEM | Social Science | Humanities | Other | Average | |------------|------------|------------|------------|------------|------------| | [Qwen/Qwen-72B](https://huggingface.co/Qwen/Qwen-72B) | 61.12 | 71.65 | 63.00 | 61.31 |64.27| | gpt-4-0613 | 60.36 | 67.36 | 56.03 | 57.62 |60.34| | [Qwen/Qwen-72B-Chat](https://huggingface.co/Qwen/Qwen-72B-Chat) | 55.15 | 66.20 | 55.65 | 57.19 |58.55| | [Qwen/Qwen-14B](https://huggingface.co/Qwen/Qwen-14B) | 46.94 | 56.69 | 49.43 | 48.81 |50.47| | Gemini-pro | 45.38 | 57.29 | 48.80 | 48.21 |49.92| | [01-ai/Yi-34B-Chat](https://huggingface.co/01-ai/Yi-34B-Chat) | 40.24 | 56.77 | 53.99 | 47.58 |49.64| | [Qwen/Qwen-14B-Chat](https://huggingface.co/Qwen/Qwen-14B-Chat) | 43.86 | 53.29 | 44.78 | 45.13 |46.77| | [01-ai/Yi-6B-Chat](https://huggingface.co/01-ai/Yi-6B-Chat) | 39.62 | 50.24 | 44.44 | 44.26 |44.64| | Claude-1.3 | 42.65 | 49.33 | 42.16 | 44.14 |44.57| | gpt-3.5-turbo-0613 | 41.56 | 46.72 | 36.73 | 42.03 |41.76| | [CausalLM/14B](https://huggingface.co/CausalLM/14B) | 39.83 | 44.50 | 39.61 | 41.97 |41.48| | [Skywork/Skywork-13B-base](https://huggingface.co/Skywork/Skywork-13B-base) | 36.93 | 47.27 | 41.04 | 40.10 |41.33| | [Qwen/Qwen-7B](https://huggingface.co/Qwen/Qwen-7B) | 37.53 | 45.48 | 38.09 | 38.96 |40.01| | [Qwen/Qwen-7B-Chat](https://huggingface.co/Qwen/Qwen-7B-Chat) | 33.32 | 44.64 | 40.27 | 39.89 |39.53| | [vivo-ai/BlueLM-7B-Base](https://huggingface.co/vivo-ai/BlueLM-7B-Base) | 33.94 | 41.52 | 37.38 | 38.74 |37.90| | [baichuan-inc/Baichuan2-13B-Chat](https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat) | 29.64 | 43.73 | 37.36 | 39.88 |37.65| | [Qwen/Qwen-1_8B](https://huggingface.co/Qwen/Qwen-1_8B) | 32.65 | 38.95 | 38.34 | 35.27 |36.30| | Claude-2 | 39.65 | 39.09 | 28.59 | 37.47 |36.20| | [THUDM/chatglm3-6b](https://huggingface.co/THUDM/chatglm3-6b) | 31.05 | 39.31 | 35.64 | 35.60 |35.40| | [deepseek-ai/deepseek-llm-7b-chat](https://huggingface.co/deepseek-ai/deepseek-llm-7b-chat) | 29.82 | 42.29 | 34.24 | 34.31 |35.17| | [CausalLM/7B](https://huggingface.co/CausalLM/7B) | 31.03 | 38.17 | 35.87 | 35.39 |35.11| | [Azure99/blossom-v3_1-mistral-7b](https://huggingface.co/Azure99/blossom-v3_1-mistral-7b) | 32.80 | 36.91 | 32.36 | 34.53 |34.15| | [microsoft/Orca-2-13b](https://huggingface.co/microsoft/Orca-2-13b) | 24.69 | 39.18 | 33.60 | 31.99 |32.37| | [Qwen/Qwen-1_8B-Chat](https://huggingface.co/Qwen/Qwen-1_8B-Chat) | 26.60 | 36.36 | 31.81 | 31.96 |31.68| | [TigerResearch/tigerbot-13b-chat-v3](https://huggingface.co/TigerResearch/tigerbot-13b-chat-v3) | 24.73 | 29.63 | 25.72 | 27.22 |26.82| | [hongyin/mistral-7b-80k](https://huggingface.co/hongyin/mistral-7b-80k) | 24.26 | 23.76 | 22.56 | 24.57 |23.79| | [deepseek-ai/deepseek-llm-67b-chat](https://huggingface.co/deepseek-ai/deepseek-llm-67b-chat) | 19.10 | 26.06 | 21.51 | 21.77 |22.11| | [yentinglin/Taiwan-LLM-13B-v2.0-chat](https://huggingface.co/yentinglin/Taiwan-LLM-13B-v2.0-chat) | 18.53 | 27.65 | 17.77 | 21.49 |21.36| | [GeneZC/MiniChat-3B](https://huggingface.co/GeneZC/MiniChat-3B) | 17.66 | 23.35 | 22.71 | 20.34 |21.02| | [LinkSoul/Chinese-Llama-2-7b](https://huggingface.co/LinkSoul/Chinese-Llama-2-7b) | 16.55 | 18.39 | 12.97 | 16.13 |16.01| | [yentinglin/Taiwan-LLM-7B-v2.1-chat](https://huggingface.co/yentinglin/Taiwan-LLM-7B-v2.1-chat) | 14.99 | 16.23 | 15.00 | 16.22 |15.61| | Claude-instant-1 | 12.52 | 17.13 | 15.10 | 13.57 |14.58| | [FlagAlpha/Atom-7B](https://huggingface.co/FlagAlpha/Atom-7B) | 5.60 | 13.57 | 7.71 | 11.84 |9.68| Results via [ievals](https://github.com/iKala/ievals) ( settings : 0-shot direct answering ) # Citation ``` @article{ikala2023eval, title={An Improved Traditional Chinese Evaluation Suite for Foundation Model}, author={Tam, Zhi-Rui and Pai, Ya-Ting}, journal={arXiv}, year={2023} } ```
ikala/tmmluplus
[ "task_categories:question-answering", "size_categories:100K<n<1M", "language:zh", "license:other", "traditional chinese", "finance", "medical", "taiwan", "benchmark", "zh-tw", "zh-hant", "region:us" ]
2023-12-22T19:12:13+00:00
{"language": ["zh"], "license": "other", "size_categories": ["100K<n<1M"], "task_categories": ["question-answering"], "pretty_name": "tmmlu++", "license_name": "creative-commons-by-nc", "tags": ["traditional chinese", "finance", "medical", "taiwan", "benchmark", "zh-tw", "zh-hant"], "configs": [{"config_name": "engineering_math", "datafiles": [{"split": "train", "path": "data/engineering_math_dev.csv"}, {"split": "validation", "path": "data/engineering_math_val.csv"}, {"split": "test", "path": "data/engineering_math_test.csv"}]}, {"config_name": "dentistry", "datafiles": [{"split": "train", "path": "data/dentistry_dev.csv"}, {"split": "validation", "path": "data/dentistry_val.csv"}, {"split": "test", "path": "data/dentistry_test.csv"}]}, {"config_name": "traditional_chinese_medicine_clinical_medicine", "datafiles": [{"split": "train", "path": "data/traditional_chinese_medicine_clinical_medicine_dev.csv"}, {"split": "validation", "path": "data/traditional_chinese_medicine_clinical_medicine_val.csv"}, {"split": "test", "path": "data/traditional_chinese_medicine_clinical_medicine_test.csv"}]}, {"config_name": "clinical_psychology", "datafiles": [{"split": "train", "path": "data/clinical_psychology_dev.csv"}, {"split": "validation", "path": "data/clinical_psychology_val.csv"}, {"split": "test", "path": "data/clinical_psychology_test.csv"}]}, {"config_name": "technical", "datafiles": [{"split": "train", "path": "data/technical_dev.csv"}, {"split": "validation", "path": "data/technical_val.csv"}, {"split": "test", "path": "data/technical_test.csv"}]}, {"config_name": "culinary_skills", "datafiles": [{"split": "train", "path": "data/culinary_skills_dev.csv"}, {"split": "validation", "path": "data/culinary_skills_val.csv"}, {"split": "test", "path": "data/culinary_skills_test.csv"}]}, {"config_name": "mechanical", "datafiles": [{"split": "train", "path": "data/mechanical_dev.csv"}, {"split": "validation", "path": "data/mechanical_val.csv"}, {"split": "test", "path": "data/mechanical_test.csv"}]}, {"config_name": "logic_reasoning", "datafiles": [{"split": "train", "path": "data/logic_reasoning_dev.csv"}, {"split": "validation", "path": "data/logic_reasoning_val.csv"}, {"split": "test", "path": "data/logic_reasoning_test.csv"}]}, {"config_name": "real_estate", "datafiles": [{"split": "train", "path": "data/real_estate_dev.csv"}, {"split": "validation", "path": "data/real_estate_val.csv"}, {"split": "test", "path": "data/real_estate_test.csv"}]}, {"config_name": "general_principles_of_law", "datafiles": [{"split": "train", "path": "data/general_principles_of_law_dev.csv"}, {"split": "validation", "path": "data/general_principles_of_law_val.csv"}, {"split": "test", "path": "data/general_principles_of_law_test.csv"}]}, {"config_name": "finance_banking", "datafiles": [{"split": "train", "path": "data/finance_banking_dev.csv"}, {"split": "validation", "path": "data/finance_banking_val.csv"}, {"split": "test", "path": "data/finance_banking_test.csv"}]}, {"config_name": "anti_money_laundering", "datafiles": [{"split": "train", "path": "data/anti_money_laundering_dev.csv"}, {"split": "validation", "path": "data/anti_money_laundering_val.csv"}, {"split": "test", "path": "data/anti_money_laundering_test.csv"}]}, {"config_name": "ttqav2", "datafiles": [{"split": "train", "path": "data/ttqav2_dev.csv"}, {"split": "validation", "path": "data/ttqav2_val.csv"}, {"split": "test", "path": "data/ttqav2_test.csv"}]}, {"config_name": "marketing_management", "datafiles": [{"split": "train", "path": "data/marketing_management_dev.csv"}, {"split": "validation", "path": "data/marketing_management_val.csv"}, {"split": "test", "path": "data/marketing_management_test.csv"}]}, {"config_name": "business_management", "datafiles": [{"split": "train", "path": "data/business_management_dev.csv"}, {"split": "validation", "path": "data/business_management_val.csv"}, {"split": "test", "path": "data/business_management_test.csv"}]}, {"config_name": "organic_chemistry", "datafiles": [{"split": "train", "path": "data/organic_chemistry_dev.csv"}, {"split": "validation", "path": "data/organic_chemistry_val.csv"}, {"split": "test", "path": "data/organic_chemistry_test.csv"}]}, {"config_name": "advance_chemistry", "datafiles": [{"split": "train", "path": "data/advance_chemistry_dev.csv"}, {"split": "validation", "path": "data/advance_chemistry_val.csv"}, {"split": "test", "path": "data/advance_chemistry_test.csv"}]}, {"config_name": "physics", "datafiles": [{"split": "train", "path": "data/physics_dev.csv"}, {"split": "validation", "path": "data/physics_val.csv"}, {"split": "test", "path": "data/physics_test.csv"}]}, {"config_name": "secondary_physics", "datafiles": [{"split": "train", "path": "data/secondary_physics_dev.csv"}, {"split": "validation", "path": "data/secondary_physics_val.csv"}, {"split": "test", "path": "data/secondary_physics_test.csv"}]}, {"config_name": "human_behavior", "datafiles": [{"split": "train", "path": "data/human_behavior_dev.csv"}, {"split": "validation", "path": "data/human_behavior_val.csv"}, {"split": "test", "path": "data/human_behavior_test.csv"}]}, {"config_name": "national_protection", "datafiles": [{"split": "train", "path": "data/national_protection_dev.csv"}, {"split": "validation", "path": "data/national_protection_val.csv"}, {"split": "test", "path": "data/national_protection_test.csv"}]}, {"config_name": "jce_humanities", "datafiles": [{"split": "train", "path": "data/jce_humanities_dev.csv"}, {"split": "validation", "path": "data/jce_humanities_val.csv"}, {"split": "test", "path": "data/jce_humanities_test.csv"}]}, {"config_name": "politic_science", "datafiles": [{"split": "train", "path": "data/politic_science_dev.csv"}, {"split": "validation", "path": "data/politic_science_val.csv"}, {"split": "test", "path": "data/politic_science_test.csv"}]}, {"config_name": "agriculture", "datafiles": [{"split": "train", "path": "data/agriculture_dev.csv"}, {"split": "validation", "path": "data/agriculture_val.csv"}, {"split": "test", "path": "data/agriculture_test.csv"}]}, {"config_name": "official_document_management", "datafiles": [{"split": "train", "path": "data/official_document_management_dev.csv"}, {"split": "validation", "path": "data/official_document_management_val.csv"}, {"split": "test", "path": "data/official_document_management_test.csv"}]}, {"config_name": "financial_analysis", "datafiles": [{"split": "train", "path": "data/financial_analysis_dev.csv"}, {"split": "validation", "path": "data/financial_analysis_val.csv"}, {"split": "test", "path": "data/financial_analysis_test.csv"}]}, {"config_name": "pharmacy", "datafiles": [{"split": "train", "path": "data/pharmacy_dev.csv"}, {"split": "validation", "path": "data/pharmacy_val.csv"}, {"split": "test", "path": "data/pharmacy_test.csv"}]}, {"config_name": "educational_psychology", "datafiles": [{"split": "train", "path": "data/educational_psychology_dev.csv"}, {"split": "validation", "path": "data/educational_psychology_val.csv"}, {"split": "test", "path": "data/educational_psychology_test.csv"}]}, {"config_name": "statistics_and_machine_learning", "datafiles": [{"split": "train", "path": "data/statistics_and_machine_learning_dev.csv"}, {"split": "validation", "path": "data/statistics_and_machine_learning_val.csv"}, {"split": "test", "path": "data/statistics_and_machine_learning_test.csv"}]}, {"config_name": "management_accounting", "datafiles": [{"split": "train", "path": "data/management_accounting_dev.csv"}, {"split": "validation", "path": "data/management_accounting_val.csv"}, {"split": "test", "path": "data/management_accounting_test.csv"}]}, {"config_name": "introduction_to_law", "datafiles": [{"split": "train", "path": "data/introduction_to_law_dev.csv"}, {"split": "validation", "path": "data/introduction_to_law_val.csv"}, {"split": "test", "path": "data/introduction_to_law_test.csv"}]}, {"config_name": "computer_science", "datafiles": [{"split": "train", "path": "data/computer_science_dev.csv"}, {"split": "validation", "path": "data/computer_science_val.csv"}, {"split": "test", "path": "data/computer_science_test.csv"}]}, {"config_name": "veterinary_pathology", "datafiles": [{"split": "train", "path": "data/veterinary_pathology_dev.csv"}, {"split": "validation", "path": "data/veterinary_pathology_val.csv"}, {"split": "test", "path": "data/veterinary_pathology_test.csv"}]}, {"config_name": "accounting", "datafiles": [{"split": "train", "path": "data/accounting_dev.csv"}, {"split": "validation", "path": "data/accounting_val.csv"}, {"split": "test", "path": "data/accounting_test.csv"}]}, {"config_name": "fire_science", "datafiles": [{"split": "train", "path": "data/fire_science_dev.csv"}, {"split": "validation", "path": "data/fire_science_val.csv"}, {"split": "test", "path": "data/fire_science_test.csv"}]}, {"config_name": "optometry", "datafiles": [{"split": "train", "path": "data/optometry_dev.csv"}, {"split": "validation", "path": "data/optometry_val.csv"}, {"split": "test", "path": "data/optometry_test.csv"}]}, {"config_name": "insurance_studies", "datafiles": [{"split": "train", "path": "data/insurance_studies_dev.csv"}, {"split": "validation", "path": "data/insurance_studies_val.csv"}, {"split": "test", "path": "data/insurance_studies_test.csv"}]}, {"config_name": "pharmacology", "datafiles": [{"split": "train", "path": "data/pharmacology_dev.csv"}, {"split": "validation", "path": "data/pharmacology_val.csv"}, {"split": "test", "path": "data/pharmacology_test.csv"}]}, {"config_name": "taxation", "datafiles": [{"split": "train", "path": "data/taxation_dev.csv"}, {"split": "validation", "path": "data/taxation_val.csv"}, {"split": "test", "path": "data/taxation_test.csv"}]}, {"config_name": "trust_practice", "datafiles": [{"split": "train", "path": "data/trust_practice_dev.csv"}, {"split": "validation", "path": "data/trust_practice_val.csv"}, {"split": "test", "path": "data/trust_practice_test.csv"}]}, {"config_name": "geography_of_taiwan", "datafiles": [{"split": "train", "path": "data/geography_of_taiwan_dev.csv"}, {"split": "validation", "path": "data/geography_of_taiwan_val.csv"}, {"split": "test", "path": "data/geography_of_taiwan_test.csv"}]}, {"config_name": "physical_education", "datafiles": [{"split": "train", "path": "data/physical_education_dev.csv"}, {"split": "validation", "path": "data/physical_education_val.csv"}, {"split": "test", "path": "data/physical_education_test.csv"}]}, {"config_name": "auditing", "datafiles": [{"split": "train", "path": "data/auditing_dev.csv"}, {"split": "validation", "path": "data/auditing_val.csv"}, {"split": "test", "path": "data/auditing_test.csv"}]}, {"config_name": "administrative_law", "datafiles": [{"split": "train", "path": "data/administrative_law_dev.csv"}, {"split": "validation", "path": "data/administrative_law_val.csv"}, {"split": "test", "path": "data/administrative_law_test.csv"}]}, {"config_name": "education_(profession_level)", "datafiles": [{"split": "train", "path": "data/education_(profession_level)_dev.csv"}, {"split": "validation", "path": "data/education_(profession_level)_val.csv"}, {"split": "test", "path": "data/education_(profession_level)_test.csv"}]}, {"config_name": "economics", "datafiles": [{"split": "train", "path": "data/economics_dev.csv"}, {"split": "validation", "path": "data/economics_val.csv"}, {"split": "test", "path": "data/economics_test.csv"}]}, {"config_name": "veterinary_pharmacology", "datafiles": [{"split": "train", "path": "data/veterinary_pharmacology_dev.csv"}, {"split": "validation", "path": "data/veterinary_pharmacology_val.csv"}, {"split": "test", "path": "data/veterinary_pharmacology_test.csv"}]}, {"config_name": "nautical_science", "datafiles": [{"split": "train", "path": "data/nautical_science_dev.csv"}, {"split": "validation", "path": "data/nautical_science_val.csv"}, {"split": "test", "path": "data/nautical_science_test.csv"}]}, {"config_name": "occupational_therapy_for_psychological_disorders", "datafiles": [{"split": "train", "path": "data/occupational_therapy_for_psychological_disorders_dev.csv"}, {"split": "validation", "path": "data/occupational_therapy_for_psychological_disorders_val.csv"}, {"split": "test", "path": "data/occupational_therapy_for_psychological_disorders_test.csv"}]}, {"config_name": "basic_medical_science", "datafiles": [{"split": "train", "path": "data/basic_medical_science_dev.csv"}, {"split": "validation", "path": "data/basic_medical_science_val.csv"}, {"split": "test", "path": "data/basic_medical_science_test.csv"}]}, {"config_name": "macroeconomics", "datafiles": [{"split": "train", "path": "data/macroeconomics_dev.csv"}, {"split": "validation", "path": "data/macroeconomics_val.csv"}, {"split": "test", "path": "data/macroeconomics_test.csv"}]}, {"config_name": "trade", "datafiles": [{"split": "train", "path": "data/trade_dev.csv"}, {"split": "validation", "path": "data/trade_val.csv"}, {"split": "test", "path": "data/trade_test.csv"}]}, {"config_name": "chinese_language_and_literature", "datafiles": [{"split": "train", "path": "data/chinese_language_and_literature_dev.csv"}, {"split": "validation", "path": "data/chinese_language_and_literature_val.csv"}, {"split": "test", "path": "data/chinese_language_and_literature_test.csv"}]}, {"config_name": "tve_design", "datafiles": [{"split": "train", "path": "data/tve_design_dev.csv"}, {"split": "validation", "path": "data/tve_design_val.csv"}, {"split": "test", "path": "data/tve_design_test.csv"}]}, {"config_name": "junior_science_exam", "datafiles": [{"split": "train", "path": "data/junior_science_exam_dev.csv"}, {"split": "validation", "path": "data/junior_science_exam_val.csv"}, {"split": "test", "path": "data/junior_science_exam_test.csv"}]}, {"config_name": "junior_math_exam", "datafiles": [{"split": "train", "path": "data/junior_math_exam_dev.csv"}, {"split": "validation", "path": "data/junior_math_exam_val.csv"}, {"split": "test", "path": "data/junior_math_exam_test.csv"}]}, {"config_name": "junior_chinese_exam", "datafiles": [{"split": "train", "path": "data/junior_chinese_exam_dev.csv"}, {"split": "validation", "path": "data/junior_chinese_exam_val.csv"}, {"split": "test", "path": "data/junior_chinese_exam_test.csv"}]}, {"config_name": "junior_social_studies", "datafiles": [{"split": "train", "path": "data/junior_social_studies_dev.csv"}, {"split": "validation", "path": "data/junior_social_studies_val.csv"}, {"split": "test", "path": "data/junior_social_studies_test.csv"}]}, {"config_name": "tve_mathematics", "datafiles": [{"split": "train", "path": "data/tve_mathematics_dev.csv"}, {"split": "validation", "path": "data/tve_mathematics_val.csv"}, {"split": "test", "path": "data/tve_mathematics_test.csv"}]}, {"config_name": "tve_chinese_language", "datafiles": [{"split": "train", "path": "data/tve_chinese_language_dev.csv"}, {"split": "validation", "path": "data/tve_chinese_language_val.csv"}, {"split": "test", "path": "data/tve_chinese_language_test.csv"}]}, {"config_name": "tve_natural_sciences", "datafiles": [{"split": "train", "path": "data/tve_natural_sciences_dev.csv"}, {"split": "validation", "path": "data/tve_natural_sciences_val.csv"}, {"split": "test", "path": "data/tve_natural_sciences_test.csv"}]}, {"config_name": "junior_chemistry", "datafiles": [{"split": "train", "path": "data/junior_chemistry_dev.csv"}, {"split": "validation", "path": "data/junior_chemistry_val.csv"}, {"split": "test", "path": "data/junior_chemistry_test.csv"}]}, {"config_name": "music", "datafiles": [{"split": "train", "path": "data/music_dev.csv"}, {"split": "validation", "path": "data/music_val.csv"}, {"split": "test", "path": "data/music_test.csv"}]}, {"config_name": "education", "datafiles": [{"split": "train", "path": "data/education_dev.csv"}, {"split": "validation", "path": "data/education_val.csv"}, {"split": "test", "path": "data/education_test.csv"}]}, {"config_name": "three_principles_of_people", "datafiles": [{"split": "train", "path": "data/three_principles_of_people_dev.csv"}, {"split": "validation", "path": "data/three_principles_of_people_val.csv"}, {"split": "test", "path": "data/three_principles_of_people_test.csv"}]}, {"config_name": "taiwanese_hokkien", "datafiles": [{"split": "train", "path": "data/taiwanese_hokkien_dev.csv"}, {"split": "validation", "path": "data/taiwanese_hokkien_val.csv"}, {"split": "test", "path": "data/taiwanese_hokkien_test.csv"}]}]}
2023-12-28T10:02:05+00:00
[]
[ "zh" ]
TAGS #task_categories-question-answering #size_categories-100K<n<1M #language-Chinese #license-other #traditional chinese #finance #medical #taiwan #benchmark #zh-tw #zh-hant #region-us
TMMLU+ : Large scale traditional chinese massive multitask language understanding ================================================================================= ![](URL alt=) We present TMMLU+, a traditional Chinese massive multitask language understanding dataset. TMMLU+ is a multiple-choice question-answering dataset featuring 66 subjects, ranging from elementary to professional level. The TMMLU+ dataset is six times larger and contains more balanced subjects compared to its predecessor, TMMLU. We have included benchmark results in TMMLU+ from closed-source models and 20 open-weight Chinese large language models, with parameters ranging from 1.8B to 72B. The benchmark results show that Traditional Chinese variants still lag behind those trained on major Simplified Chinese models. For each dataset split Statistic on all four categories : STEM, Social Science, Humanities, Other Benchmark on direct prompting ----------------------------- Results via ievals ( settings : 0-shot direct answering )
[]
[ "TAGS\n#task_categories-question-answering #size_categories-100K<n<1M #language-Chinese #license-other #traditional chinese #finance #medical #taiwan #benchmark #zh-tw #zh-hant #region-us \n" ]
[ 66 ]
[ "passage: TAGS\n#task_categories-question-answering #size_categories-100K<n<1M #language-Chinese #license-other #traditional chinese #finance #medical #taiwan #benchmark #zh-tw #zh-hant #region-us \n" ]
e048369ea188c11ad0de19bf258b5697ae961107
# HisGermaNER: NER Datasets for Historical German <img src="https://huggingface.co/datasets/stefan-it/HisGermaNER/resolve/main/assets/logo.jpeg" width="500" height="500" /> In this repository we release another NER dataset from historical German newspapers. ## Newspaper corpus In the first release of our dataset, we select 11 newspapers from 1710 to 1840 from the Austrian National Library (ONB), resulting in 100 pages: | Year | ONB ID | Newspaper | URL | Pages | | ---- | ------------------ | -------------------------------- | ------------------------------------------------------------------------ | ----- | | 1720 | `ONB_wrz_17200511` | Wiener Zeitung | [Viewer](https://anno.onb.ac.at/cgi-content/anno?aid=wrz&datum=17200511) | 10 | | 1730 | `ONB_wrz_17300603` | Wiener Zeitung | [Viewer](https://anno.onb.ac.at/cgi-content/anno?aid=wrz&datum=17300603) | 14 | | 1740 | `ONB_wrz_17401109` | Wiener Zeitung | [Viewer](https://anno.onb.ac.at/cgi-content/anno?aid=wrz&datum=17401109) | 12 | | 1770 | `ONB_rpr_17700517` | Reichspostreuter | [Viewer](https://anno.onb.ac.at/cgi-content/anno?aid=rpr&datum=17700517) | 4 | | 1780 | `ONB_wrz_17800701` | Wiener Zeitung | [Viewer](https://anno.onb.ac.at/cgi-content/anno?aid=wrz&datum=17800701) | 24 | | 1790 | `ONB_pre_17901030` | Preßburger Zeitung | [Viewer](https://anno.onb.ac.at/cgi-content/anno?aid=pre&datum=17901030) | 12 | | 1800 | `ONB_ibs_18000322` | Intelligenzblatt von Salzburg | [Viewer](https://anno.onb.ac.at/cgi-content/anno?aid=ibs&datum=18000322) | 8 | | 1810 | `ONB_mgs_18100508` | Morgenblatt für gebildete Stände | [Viewer](https://anno.onb.ac.at/cgi-content/anno?aid=mgs&datum=18100508) | 4 | | 1820 | `ONB_wan_18200824` | Der Wanderer | [Viewer](https://anno.onb.ac.at/cgi-content/anno?aid=wan&datum=18200824) | 4 | | 1830 | `ONB_ild_18300713` | Das Inland | [Viewer](https://anno.onb.ac.at/cgi-content/anno?aid=ild&datum=18300713) | 4 | | 1840 | `ONB_hum_18400625` | Der Humorist | [Viewer](https://anno.onb.ac.at/cgi-content/anno?aid=hum&datum=18400625) | 4 | ## Data Workflow In the first step, we obtain original scans from ONB for our selected newspapers. In the second step, we perform OCR using [Transkribus](https://readcoop.eu/de/transkribus/). We use the [Transkribus print M1](https://readcoop.eu/model/transkribus-print-multi-language-dutch-german-english-finnish-french-swedish-etc/) model for performing OCR. Note: we experimented with an existing NewsEye model, but the print M1 model is newer and led to better performance in our preliminary experiments. Only layout hints/fixes were made in Transkribus. So no OCR corrections or normalizations were performed. <img src="https://huggingface.co/datasets/stefan-it/HisGermaNER/resolve/main/assets/transkribus_wrz_17401109.png" width="500" height="500" /> We export plain text of all newspaper pages into plain text format and perform normalization of hyphenation and the `=` character. After normalization we tokenize the plain text newspaper pages using the `PreTokenizer` of the [hmBERT](https://huggingface.co/hmbert) model. After pre-tokenization we import the corpus into Argilla to start the annotation of named entities. Note: We perform annotation at page/document-level. Thus, no sentence segmentation is needed and performed. In the annotation process we also manually annotate sentence boundaries using a special `EOS` tag. <img src="https://huggingface.co/datasets/stefan-it/HisGermaNER/resolve/main/assets/argilla_wrz_17401109.png" width="600" height="600" /> The dataset is exported into an CoNLL-like format after the annotation process. The `EOS` tag is removed and the information of an potential end of sentence is stored in a special column. ## Annotation Guidelines We use the same NE's (`PER`, `LOC` and `ORG`) and annotation guideline as used in the awesome [Europeana NER Corpora](https://github.com/cneud/ner-corpora). Furthermore, we introduced some specific rules for annotations: * `PER`: We include e.g. `Kaiser`, `Lord`, `Cardinal` or `Graf` in the NE, but not `Herr`, `Fräulein`, `General` or rank/grades. * `LOC`: We excluded `Königreich` from the NE. ## Dataset Format Our dataset format is inspired by the [HIPE-2022 Shared Task](https://github.com/hipe-eval/HIPE-2022-data?tab=readme-ov-file#hipe-format-and-tagging-scheme). Here's an example of an annotated document: ```txt TOKEN NE-COARSE-LIT MISC -DOCSTART- O _ # onb:id = ONB_wrz_17800701 # onb:image_link = https://anno.onb.ac.at/cgi-content/anno?aid=wrz&datum=17800701&seite=12 # onb:page_nr = 12 # onb:publication_year_str = 17800701 den O _ Pöbel O _ noch O _ mehr O _ in O _ Harnisch O _ . O EndOfSentence Sie O _ legten O _ sogleich O _ ``` Note: we include a `-DOCSTART-` marker to e.g. allow document-level features for NER as proposed in the [FLERT](https://arxiv.org/abs/2011.06993) paper. ## Dataset Splits & Stats For training powerful NER models on the dataset, we manually document-splitted the dataset into training, development and test splits. The training split consists of 73 documents, development split of 13 documents and test split of 14 documents. We perform dehyphenation as one and only preprocessing step. The final dataset splits can be found in the `splits` folder of this dataset repository. Some dataset statistics - instances per class: | Class | Training | Development | Test | | ----- | -------- | ----------- | ---- | | `PER` | 942 | 308 | 238 | | `LOC` | 749 | 217 | 216 | | `ORG` | 16 | 3 | 11 | Number of sentences (incl. document marker) per split: | | Training | Development | Test | | --------- | -------- | ----------- | ---- | | Sentences | 1.539 | 406 | 400 | # Release Cycles We plan to release new updated versions of this dataset on a regular basis (e.g. monthly). For now, we want to collect some feedback about the dataset first, so we use `v0` as current version. # Questions & Feedback Please open a new discussion [here](https://huggingface.co/datasets/stefan-it/HisGermaNER/discussions) for questions or feedback! # License Dataset is (currently) licenced under [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/).
stefan-it/HisGermaNER
[ "language:de", "arxiv:2011.06993", "region:us" ]
2023-12-22T19:37:59+00:00
{"language": ["de"], "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "splits/HisGermaNER_v0_train.tsv"}, {"split": "validation", "path": "splits/HisGermaNER_v0_dev.tsv"}, {"split": "test", "path": "splits/HisGermaNER_v0_test.tsv"}], "sep": "\t"}]}
2023-12-23T00:57:24+00:00
[ "2011.06993" ]
[ "de" ]
TAGS #language-German #arxiv-2011.06993 #region-us
HisGermaNER: NER Datasets for Historical German =============================================== <img src="URL width="500" height="500" /> In this repository we release another NER dataset from historical German newspapers. Newspaper corpus ---------------- In the first release of our dataset, we select 11 newspapers from 1710 to 1840 from the Austrian National Library (ONB), resulting in 100 pages: Data Workflow ------------- In the first step, we obtain original scans from ONB for our selected newspapers. In the second step, we perform OCR using Transkribus. We use the Transkribus print M1 model for performing OCR. Note: we experimented with an existing NewsEye model, but the print M1 model is newer and led to better performance in our preliminary experiments. Only layout hints/fixes were made in Transkribus. So no OCR corrections or normalizations were performed. <img src="URL width="500" height="500" /> We export plain text of all newspaper pages into plain text format and perform normalization of hyphenation and the '=' character. After normalization we tokenize the plain text newspaper pages using the 'PreTokenizer' of the hmBERT model. After pre-tokenization we import the corpus into Argilla to start the annotation of named entities. Note: We perform annotation at page/document-level. Thus, no sentence segmentation is needed and performed. In the annotation process we also manually annotate sentence boundaries using a special 'EOS' tag. <img src="URL width="600" height="600" /> The dataset is exported into an CoNLL-like format after the annotation process. The 'EOS' tag is removed and the information of an potential end of sentence is stored in a special column. Annotation Guidelines --------------------- We use the same NE's ('PER', 'LOC' and 'ORG') and annotation guideline as used in the awesome Europeana NER Corpora. Furthermore, we introduced some specific rules for annotations: * 'PER': We include e.g. 'Kaiser', 'Lord', 'Cardinal' or 'Graf' in the NE, but not 'Herr', 'Fräulein', 'General' or rank/grades. * 'LOC': We excluded 'Königreich' from the NE. Dataset Format -------------- Our dataset format is inspired by the HIPE-2022 Shared Task. Here's an example of an annotated document: Note: we include a '-DOCSTART-' marker to e.g. allow document-level features for NER as proposed in the FLERT paper. Dataset Splits & Stats ---------------------- For training powerful NER models on the dataset, we manually document-splitted the dataset into training, development and test splits. The training split consists of 73 documents, development split of 13 documents and test split of 14 documents. We perform dehyphenation as one and only preprocessing step. The final dataset splits can be found in the 'splits' folder of this dataset repository. Some dataset statistics - instances per class: Number of sentences (incl. document marker) per split: Release Cycles ============== We plan to release new updated versions of this dataset on a regular basis (e.g. monthly). For now, we want to collect some feedback about the dataset first, so we use 'v0' as current version. Questions & Feedback ==================== Please open a new discussion here for questions or feedback! License ======= Dataset is (currently) licenced under CC BY 4.0.
[]
[ "TAGS\n#language-German #arxiv-2011.06993 #region-us \n" ]
[ 19 ]
[ "passage: TAGS\n#language-German #arxiv-2011.06993 #region-us \n" ]
c5ad67678ecb43924f5e2f07024b089cd02b37c5
# GSM8K-Consistency Benchmark **GSM8K-Consistency** is a benchmark database for analyzing the consistency of `Arithmetic Reasoning on GSM8K`. ## 🚀 The dataset is available on 🤗 Hugging Face! This is a math-problem-related semantics-preserving perturbation benchmark that can be very helpful for evaluating the consistency of arithmetic reasoning capability. ## 💻 Dataset Usage Run the following command to load the data: ```python from datasets import load_dataset dataset = load_dataset("shuyuej/GSM8K-Consistency") dataset = dataset['train'] print(dataset) ``` Dataset Description: ```python Dataset({ features: ['id', 'original_question', 'paraphrased_question', 'answer_detail', 'numerical_answer'], num_rows: 85225 }) ```
shuyuej/GSM8K-Consistency
[ "license:apache-2.0", "region:us" ]
2023-12-22T19:46:02+00:00
{"license": "apache-2.0"}
2023-12-31T03:07:18+00:00
[]
[]
TAGS #license-apache-2.0 #region-us
# GSM8K-Consistency Benchmark GSM8K-Consistency is a benchmark database for analyzing the consistency of 'Arithmetic Reasoning on GSM8K'. ## The dataset is available on Hugging Face! This is a math-problem-related semantics-preserving perturbation benchmark that can be very helpful for evaluating the consistency of arithmetic reasoning capability. ## Dataset Usage Run the following command to load the data: Dataset Description:
[ "# GSM8K-Consistency Benchmark\nGSM8K-Consistency is a benchmark database for analyzing the consistency of 'Arithmetic Reasoning on GSM8K'.", "## The dataset is available on Hugging Face!\nThis is a math-problem-related semantics-preserving perturbation benchmark that can be very helpful for evaluating the consistency of arithmetic reasoning capability.", "## Dataset Usage\nRun the following command to load the data:\n\n\nDataset Description:" ]
[ "TAGS\n#license-apache-2.0 #region-us \n", "# GSM8K-Consistency Benchmark\nGSM8K-Consistency is a benchmark database for analyzing the consistency of 'Arithmetic Reasoning on GSM8K'.", "## The dataset is available on Hugging Face!\nThis is a math-problem-related semantics-preserving perturbation benchmark that can be very helpful for evaluating the consistency of arithmetic reasoning capability.", "## Dataset Usage\nRun the following command to load the data:\n\n\nDataset Description:" ]
[ 14, 45, 50, 18 ]
[ "passage: TAGS\n#license-apache-2.0 #region-us \n# GSM8K-Consistency Benchmark\nGSM8K-Consistency is a benchmark database for analyzing the consistency of 'Arithmetic Reasoning on GSM8K'.## The dataset is available on Hugging Face!\nThis is a math-problem-related semantics-preserving perturbation benchmark that can be very helpful for evaluating the consistency of arithmetic reasoning capability.## Dataset Usage\nRun the following command to load the data:\n\n\nDataset Description:" ]
55fada843cb3851f3c18db55b08cee50e63e5915
## Dataset Description - **Homepage:** https://image-net.org/index.php - **Paper:** https://arxiv.org/abs/1409.0575 ### Dataset Summary This is a subset of the full `Winter21`, filtered according to https://github.com/Alibaba-MIIL/ImageNet21K. This instance contains 10450 classes with a train and validation split. ### Processing I performed some processing while sharding this dataset: * Synsets were filtered according to ImageNet-21-P scripts * Images were re-encoded in WEBP ## Additional Information ### Dataset Curators Authors of [[1]](https://arxiv.org/abs/1409.0575) and [[2]](https://ieeexplore.ieee.org/abstract/document/5206848): - Olga Russakovsky - Jia Deng - Hao Su - Jonathan Krause - Sanjeev Satheesh - Wei Dong - Richard Socher - Li-Jia Li - Kai Li - Sean Ma - Zhiheng Huang - Andrej Karpathy - Aditya Khosla - Michael Bernstein - Alexander C Berg - Li Fei-Fei ### Licensing Information In exchange for permission to use the ImageNet database (the "Database") at Princeton University and Stanford University, Researcher hereby agrees to the following terms and conditions: 1. Researcher shall use the Database only for non-commercial research and educational purposes. 1. Princeton University and Stanford University make no representations or warranties regarding the Database, including but not limited to warranties of non-infringement or fitness for a particular purpose. 1. Researcher accepts full responsibility for his or her use of the Database and shall defend and indemnify the ImageNet team, Princeton University, and Stanford University, including their employees, Trustees, officers and agents, against any and all claims arising from Researcher's use of the Database, including but not limited to Researcher's use of any copies of copyrighted images that he or she may create from the Database. 1. Researcher may provide research associates and colleagues with access to the Database provided that they first agree to be bound by these terms and conditions. 1. Princeton University and Stanford University reserve the right to terminate Researcher's access to the Database at any time. 1. If Researcher is employed by a for-profit, commercial entity, Researcher's employer shall also be bound by these terms and conditions, and Researcher hereby represents that he or she is fully authorized to enter into this agreement on behalf of such employer. 1. The law of the State of New Jersey shall apply to all disputes under this agreement. ### Citation Information ```bibtex @article{imagenet15russakovsky, Author = {Olga Russakovsky and Jia Deng and Hao Su and Jonathan Krause and Sanjeev Satheesh and Sean Ma and Zhiheng Huang and Andrej Karpathy and Aditya Khosla and Michael Bernstein and Alexander C. Berg and Li Fei-Fei}, Title = { {ImageNet Large Scale Visual Recognition Challenge} }, Year = {2015}, journal = {International Journal of Computer Vision (IJCV)}, doi = {10.1007/s11263-015-0816-y}, volume={115}, number={3}, pages={211-252} } ```
timm/imagenet-w21-p
[ "task_categories:image-classification", "size_categories:10M<n<100M", "arxiv:1409.0575", "region:us" ]
2023-12-22T19:50:42+00:00
{"size_categories": ["10M<n<100M"], "task_categories": ["image-classification"], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": "int64"}, {"name": "label_tree", "sequence": "int64"}, {"name": "semantic_labels", "sequence": "int64"}, {"name": "image_id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 480116925496.397, "num_examples": 11060239}, {"name": "validation", "num_bytes": 21689509732, "num_examples": 522500}], "download_size": 497439410205, "dataset_size": 501806435228.397}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "extra_gated_prompt": "By clicking on \u201cAccess repository\u201d below, you also agree to ImageNet Terms of Access:\n[RESEARCHER_FULLNAME] (the \"Researcher\") has requested permission to use the ImageNet database (the \"Database\") at Princeton University and Stanford University. In exchange for such permission, Researcher hereby agrees to the following terms and conditions:\n1. Researcher shall use the Database only for non-commercial research and educational purposes.\n2. Princeton University, Stanford University and Hugging Face make no representations or warranties regarding the Database, including but not limited to warranties of non-infringement or fitness for a particular purpose.\n3. Researcher accepts full responsibility for his or her use of the Database and shall defend and indemnify the ImageNet team, Princeton University, Stanford University and Hugging Face, including their employees, Trustees, officers and agents, against any and all claims arising from Researcher's use of the Database, including but not limited to Researcher's use of any copies of copyrighted images that he or she may create from the Database.\n4. Researcher may provide research associates and colleagues with access to the Database provided that they first agree to be bound by these terms and conditions.\n5. Princeton University, Stanford University and Hugging Face reserve the right to terminate Researcher's access to the Database at any time.\n6. If Researcher is employed by a for-profit, commercial entity, Researcher's employer shall also be bound by these terms and conditions, and Researcher hereby represents that he or she is fully authorized to enter into this agreement on behalf of such employer.\n7. The law of the State of New Jersey shall apply to all disputes under this agreement."}
2024-01-07T18:12:01+00:00
[ "1409.0575" ]
[]
TAGS #task_categories-image-classification #size_categories-10M<n<100M #arxiv-1409.0575 #region-us
## Dataset Description - Homepage: URL - Paper: URL ### Dataset Summary This is a subset of the full 'Winter21', filtered according to URL This instance contains 10450 classes with a train and validation split. ### Processing I performed some processing while sharding this dataset: * Synsets were filtered according to ImageNet-21-P scripts * Images were re-encoded in WEBP ## Additional Information ### Dataset Curators Authors of [[1]](URL and [[2]](URL - Olga Russakovsky - Jia Deng - Hao Su - Jonathan Krause - Sanjeev Satheesh - Wei Dong - Richard Socher - Li-Jia Li - Kai Li - Sean Ma - Zhiheng Huang - Andrej Karpathy - Aditya Khosla - Michael Bernstein - Alexander C Berg - Li Fei-Fei ### Licensing Information In exchange for permission to use the ImageNet database (the "Database") at Princeton University and Stanford University, Researcher hereby agrees to the following terms and conditions: 1. Researcher shall use the Database only for non-commercial research and educational purposes. 1. Princeton University and Stanford University make no representations or warranties regarding the Database, including but not limited to warranties of non-infringement or fitness for a particular purpose. 1. Researcher accepts full responsibility for his or her use of the Database and shall defend and indemnify the ImageNet team, Princeton University, and Stanford University, including their employees, Trustees, officers and agents, against any and all claims arising from Researcher's use of the Database, including but not limited to Researcher's use of any copies of copyrighted images that he or she may create from the Database. 1. Researcher may provide research associates and colleagues with access to the Database provided that they first agree to be bound by these terms and conditions. 1. Princeton University and Stanford University reserve the right to terminate Researcher's access to the Database at any time. 1. If Researcher is employed by a for-profit, commercial entity, Researcher's employer shall also be bound by these terms and conditions, and Researcher hereby represents that he or she is fully authorized to enter into this agreement on behalf of such employer. 1. The law of the State of New Jersey shall apply to all disputes under this agreement.
[ "## Dataset Description\n\n- Homepage: URL\n- Paper: URL", "### Dataset Summary\n\nThis is a subset of the full 'Winter21', filtered according to URL This instance contains 10450 classes with a train and validation split.", "### Processing\nI performed some processing while sharding this dataset:\n* Synsets were filtered according to ImageNet-21-P scripts\n* Images were re-encoded in WEBP", "## Additional Information", "### Dataset Curators\n\nAuthors of [[1]](URL and [[2]](URL\n\n- Olga Russakovsky\n- Jia Deng\n- Hao Su\n- Jonathan Krause\n- Sanjeev Satheesh\n- Wei Dong\n- Richard Socher\n- Li-Jia Li\n- Kai Li\n- Sean Ma\n- Zhiheng Huang\n- Andrej Karpathy\n- Aditya Khosla\n- Michael Bernstein\n- Alexander C Berg\n- Li Fei-Fei", "### Licensing Information\n\nIn exchange for permission to use the ImageNet database (the \"Database\") at Princeton University and Stanford University, Researcher hereby agrees to the following terms and conditions:\n\n1. Researcher shall use the Database only for non-commercial research and educational purposes.\n1. Princeton University and Stanford University make no representations or warranties regarding the Database, including but not limited to warranties of non-infringement or fitness for a particular purpose.\n1. Researcher accepts full responsibility for his or her use of the Database and shall defend and indemnify the ImageNet team, Princeton University, and Stanford University, including their employees, Trustees, officers and agents, against any and all claims arising from Researcher's use of the Database, including but not limited to Researcher's use of any copies of copyrighted images that he or she may create from the Database.\n1. Researcher may provide research associates and colleagues with access to the Database provided that they first agree to be bound by these terms and conditions.\n1. Princeton University and Stanford University reserve the right to terminate Researcher's access to the Database at any time.\n1. If Researcher is employed by a for-profit, commercial entity, Researcher's employer shall also be bound by these terms and conditions, and Researcher hereby represents that he or she is fully authorized to enter into this agreement on behalf of such employer.\n1. The law of the State of New Jersey shall apply to all disputes under this agreement." ]
[ "TAGS\n#task_categories-image-classification #size_categories-10M<n<100M #arxiv-1409.0575 #region-us \n", "## Dataset Description\n\n- Homepage: URL\n- Paper: URL", "### Dataset Summary\n\nThis is a subset of the full 'Winter21', filtered according to URL This instance contains 10450 classes with a train and validation split.", "### Processing\nI performed some processing while sharding this dataset:\n* Synsets were filtered according to ImageNet-21-P scripts\n* Images were re-encoded in WEBP", "## Additional Information", "### Dataset Curators\n\nAuthors of [[1]](URL and [[2]](URL\n\n- Olga Russakovsky\n- Jia Deng\n- Hao Su\n- Jonathan Krause\n- Sanjeev Satheesh\n- Wei Dong\n- Richard Socher\n- Li-Jia Li\n- Kai Li\n- Sean Ma\n- Zhiheng Huang\n- Andrej Karpathy\n- Aditya Khosla\n- Michael Bernstein\n- Alexander C Berg\n- Li Fei-Fei", "### Licensing Information\n\nIn exchange for permission to use the ImageNet database (the \"Database\") at Princeton University and Stanford University, Researcher hereby agrees to the following terms and conditions:\n\n1. Researcher shall use the Database only for non-commercial research and educational purposes.\n1. Princeton University and Stanford University make no representations or warranties regarding the Database, including but not limited to warranties of non-infringement or fitness for a particular purpose.\n1. Researcher accepts full responsibility for his or her use of the Database and shall defend and indemnify the ImageNet team, Princeton University, and Stanford University, including their employees, Trustees, officers and agents, against any and all claims arising from Researcher's use of the Database, including but not limited to Researcher's use of any copies of copyrighted images that he or she may create from the Database.\n1. Researcher may provide research associates and colleagues with access to the Database provided that they first agree to be bound by these terms and conditions.\n1. Princeton University and Stanford University reserve the right to terminate Researcher's access to the Database at any time.\n1. If Researcher is employed by a for-profit, commercial entity, Researcher's employer shall also be bound by these terms and conditions, and Researcher hereby represents that he or she is fully authorized to enter into this agreement on behalf of such employer.\n1. The law of the State of New Jersey shall apply to all disputes under this agreement." ]
[ 37, 12, 40, 43, 5, 96, 327 ]
[ "passage: TAGS\n#task_categories-image-classification #size_categories-10M<n<100M #arxiv-1409.0575 #region-us \n## Dataset Description\n\n- Homepage: URL\n- Paper: URL### Dataset Summary\n\nThis is a subset of the full 'Winter21', filtered according to URL This instance contains 10450 classes with a train and validation split.### Processing\nI performed some processing while sharding this dataset:\n* Synsets were filtered according to ImageNet-21-P scripts\n* Images were re-encoded in WEBP## Additional Information### Dataset Curators\n\nAuthors of [[1]](URL and [[2]](URL\n\n- Olga Russakovsky\n- Jia Deng\n- Hao Su\n- Jonathan Krause\n- Sanjeev Satheesh\n- Wei Dong\n- Richard Socher\n- Li-Jia Li\n- Kai Li\n- Sean Ma\n- Zhiheng Huang\n- Andrej Karpathy\n- Aditya Khosla\n- Michael Bernstein\n- Alexander C Berg\n- Li Fei-Fei" ]
3bfe4db8c9d5b5eefc6b840a56c6c1af6dc4a99b
# Dataset Card for TowerBlocks TowerBlocks is the dataset used to train [TowerInstruct-v0.1](https://huggingface.co/Unbabel/TowerInstruct-7B-v0.1), a language model specialized for translation tasks such as machine translation (e.g. general, document, terminology-aware or context-aware translation), automatic post edition, named-entity recognition, gramatical error correction, and paraphrase generation. - **Curated by:** Unbabel, Instituto Superior Técnico, CentraleSupélec, University of Paris-Saclay; - **Language(s) (NLP):** English, Portuguese, Spanish, French, German, Dutch, Italian, Korean, Chinese, Russian; - **License:** TowerBlocks contains data from many sources. We refer to the respective data sources below for information regarding licensing of the data. ## Dataset Details TowerBlocks is a conversational dataset for translation related tasks created from a diverse set of high quality data sources: | Data Source | Task(s) | | -------------- | ----------- | | [WMT14 to WMT21](https://www.statmt.org/wmt22/results.html) | General Translation | | [WMT22](https://github.com/microsoft/gpt-MT) | Few-shot General Translation w/ Quality Shots | | [NTREX](https://github.com/MicrosoftTranslator/NTREX) | General Translation | | [Flores Dev](https://github.com/facebookresearch/flores) | General Translation | | [FRMT](https://github.com/google-research/google-research/tree/master/frmt) | General Translation | | [QT21](https://lindat.mff.cuni.cz/repository/xmlui/handle/11372/LRT-2390) | General Translation, Automatic Post Edition | | [ApeQuest](https://apequest.wordpress.com/) | General Translation, Automatic Post Edition | | [OPUS (Quality Filtered)](https://opus.nlpl.eu/) | General Translation | | [MT-GenEval](https://github.com/amazon-science/machine-translation-gender-eval) | General Translation, Context-Aware Translation | | [WMT20 to WMT22 Metrics MQM](https://www.statmt.org/wmt22/results.html) | Machine Translation Evaluation | | [WMT17 to WMT22 Metrics Direct Assessments](https://www.statmt.org/wmt22/results.html) | Machine Translation Evaluation | | [WMT21 Terminology Dev (filtered)](https://www.statmt.org/wmt21/terminology-task.html) | Terminology-aware Translation | | [Tatoeba Dev (filtered)](https://github.com/Helsinki-NLP/Tatoeba-Challenge) | Multi-reference Translation | | [MultiCoNER 2022 and 2023 Dev](https://registry.opendata.aws/multiconer/) | Named-entity Recognition | | [PAWS-X Dev](https://github.com/google-research-datasets/paws) | Paraphrase Generation | | [UltraChat 200k (filtered)](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k) | Synthetic Chat data | | [Glaive Code Assistant (filtered)](https://huggingface.co/datasets/glaiveai/glaive-code-assistant) | Code instructions | The dataset was built by generating user instructions with records from each data source using a set of zero- and few-shot templates (with the exception of UltraChat 200k and Glaive Code Assistant which already contain user instructions). ### Dataset features * `conversations` - The user and assistant dialog turns; * `dataset` - Original dataset for the record; * `lang` - Either the language or language pair of the original dataset; * `task` - Task for the record (Can be used to identify the training templates for each task); * `split` - Split of the original dataset from which the record was taken. ## Intended uses and limitations TowerBlocks is intended for specializing language models towards translation related tasks via supervised finetuning. ## Citation To be completed.
Unbabel/TowerBlocks-v0.1
[ "task_categories:conversational", "size_categories:100K<n<1M", "language:en", "language:de", "language:fr", "language:zh", "language:pt", "language:nl", "language:ru", "language:ko", "language:it", "language:es", "region:us" ]
2023-12-22T19:51:16+00:00
{"language": ["en", "de", "fr", "zh", "pt", "nl", "ru", "ko", "it", "es"], "size_categories": ["100K<n<1M"], "task_categories": ["conversational"], "dataset_info": {"features": [{"name": "conversations", "list": [{"name": "from", "dtype": "string"}, {"name": "value", "dtype": "string"}]}, {"name": "lang", "dtype": "string"}, {"name": "split", "dtype": "string"}, {"name": "dataset", "dtype": "string"}, {"name": "task", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1568822476, "num_examples": 637495}], "download_size": 730580350, "dataset_size": 1568822476}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2024-01-11T14:05:45+00:00
[]
[ "en", "de", "fr", "zh", "pt", "nl", "ru", "ko", "it", "es" ]
TAGS #task_categories-conversational #size_categories-100K<n<1M #language-English #language-German #language-French #language-Chinese #language-Portuguese #language-Dutch #language-Russian #language-Korean #language-Italian #language-Spanish #region-us
Dataset Card for TowerBlocks ============================ TowerBlocks is the dataset used to train TowerInstruct-v0.1, a language model specialized for translation tasks such as machine translation (e.g. general, document, terminology-aware or context-aware translation), automatic post edition, named-entity recognition, gramatical error correction, and paraphrase generation. * Curated by: Unbabel, Instituto Superior Técnico, CentraleSupélec, University of Paris-Saclay; * Language(s) (NLP): English, Portuguese, Spanish, French, German, Dutch, Italian, Korean, Chinese, Russian; * License: TowerBlocks contains data from many sources. We refer to the respective data sources below for information regarding licensing of the data. Dataset Details --------------- TowerBlocks is a conversational dataset for translation related tasks created from a diverse set of high quality data sources: The dataset was built by generating user instructions with records from each data source using a set of zero- and few-shot templates (with the exception of UltraChat 200k and Glaive Code Assistant which already contain user instructions). ### Dataset features * 'conversations' - The user and assistant dialog turns; * 'dataset' - Original dataset for the record; * 'lang' - Either the language or language pair of the original dataset; * 'task' - Task for the record (Can be used to identify the training templates for each task); * 'split' - Split of the original dataset from which the record was taken. Intended uses and limitations ----------------------------- TowerBlocks is intended for specializing language models towards translation related tasks via supervised finetuning. To be completed.
[ "### Dataset features\n\n\n* 'conversations' - The user and assistant dialog turns;\n* 'dataset' - Original dataset for the record;\n* 'lang' - Either the language or language pair of the original dataset;\n* 'task' - Task for the record (Can be used to identify the training templates for each task);\n* 'split' - Split of the original dataset from which the record was taken.\n\n\nIntended uses and limitations\n-----------------------------\n\n\nTowerBlocks is intended for specializing language models towards translation related tasks via supervised finetuning.\n\n\nTo be completed." ]
[ "TAGS\n#task_categories-conversational #size_categories-100K<n<1M #language-English #language-German #language-French #language-Chinese #language-Portuguese #language-Dutch #language-Russian #language-Korean #language-Italian #language-Spanish #region-us \n", "### Dataset features\n\n\n* 'conversations' - The user and assistant dialog turns;\n* 'dataset' - Original dataset for the record;\n* 'lang' - Either the language or language pair of the original dataset;\n* 'task' - Task for the record (Can be used to identify the training templates for each task);\n* 'split' - Split of the original dataset from which the record was taken.\n\n\nIntended uses and limitations\n-----------------------------\n\n\nTowerBlocks is intended for specializing language models towards translation related tasks via supervised finetuning.\n\n\nTo be completed." ]
[ 79, 133 ]
[ "passage: TAGS\n#task_categories-conversational #size_categories-100K<n<1M #language-English #language-German #language-French #language-Chinese #language-Portuguese #language-Dutch #language-Russian #language-Korean #language-Italian #language-Spanish #region-us \n### Dataset features\n\n\n* 'conversations' - The user and assistant dialog turns;\n* 'dataset' - Original dataset for the record;\n* 'lang' - Either the language or language pair of the original dataset;\n* 'task' - Task for the record (Can be used to identify the training templates for each task);\n* 'split' - Split of the original dataset from which the record was taken.\n\n\nIntended uses and limitations\n-----------------------------\n\n\nTowerBlocks is intended for specializing language models towards translation related tasks via supervised finetuning.\n\n\nTo be completed." ]
312316e841516c39cc1b287cd00aba81a046962c
# Dataset Card for "rmh_subset_medium" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
thorirhrafn/rmh_subset_medium
[ "region:us" ]
2023-12-22T20:15:14+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}, {"split": "valid", "path": "data/valid-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 707846794, "num_examples": 282160}, {"name": "test", "num_bytes": 23981399, "num_examples": 10000}, {"name": "valid", "num_bytes": 3416614, "num_examples": 2000}], "download_size": 448271172, "dataset_size": 735244807}}
2023-12-22T20:15:47+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rmh_subset_medium" More Information needed
[ "# Dataset Card for \"rmh_subset_medium\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rmh_subset_medium\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rmh_subset_medium\"\n\nMore Information needed" ]
255200939d9c8a7d62e886595ee9d8da17d85341
This dataset is based on the BraTS2023 dataset. It takes 5 middle slices from each nifti volume of the BraTS2023 dataset after normalizing to a value of (-1,1). All of these images are `.npy` files and one can load them using the `np.load(FILEPATH).astype(np.float32)`. We provide the training and the test set which contains 6255 and 1095 files respectively. It is highly recommend to create a separate validation set from the training dataset for applications. We use `Pytorch` to do this. We do this by using the following command. ```python seed = 97 train_dataset, val_dataset = torch.utils.data.random_split( dataset, lengths=(0.9, 0.1), generator=torch.Generator().manual_seed(seed) ) # dataset is the dataset instance. ``` This dataset is actually part of a paper which is under peer-review currently. It is mainly used for multi-domain medical image to image translation. We hope this helps the community.
sohonjit/brats2023_5slices
[ "task_categories:image-to-image", "language:en", "license:mit", "medical", "region:us" ]
2023-12-22T20:17:48+00:00
{"language": ["en"], "license": "mit", "task_categories": ["image-to-image"], "tags": ["medical"]}
2023-12-22T20:38:13+00:00
[]
[ "en" ]
TAGS #task_categories-image-to-image #language-English #license-mit #medical #region-us
This dataset is based on the BraTS2023 dataset. It takes 5 middle slices from each nifti volume of the BraTS2023 dataset after normalizing to a value of (-1,1). All of these images are '.npy' files and one can load them using the 'URL(FILEPATH).astype(np.float32)'. We provide the training and the test set which contains 6255 and 1095 files respectively. It is highly recommend to create a separate validation set from the training dataset for applications. We use 'Pytorch' to do this. We do this by using the following command. This dataset is actually part of a paper which is under peer-review currently. It is mainly used for multi-domain medical image to image translation. We hope this helps the community.
[]
[ "TAGS\n#task_categories-image-to-image #language-English #license-mit #medical #region-us \n" ]
[ 30 ]
[ "passage: TAGS\n#task_categories-image-to-image #language-English #license-mit #medical #region-us \n" ]
c24de6921f4ec1f3d6fa4c2b139181cc1e39a9f8
# RecognaSumm Dataset ## Introduction RecognaSumm is a novel and comprehensive database specifically designed for the task of automatic text summarization in Portuguese. RecognaSumm stands out due to its diverse origin, composed of news collected from a variety of information sources, including agencies and online news portals. The database was constructed using web scraping techniques and careful curation, re sulting in a rich and representative collection of documents covering various topics and journalis tic styles. The creation of RecognaSumm aims to fill a significant void in Portuguese language summarization research, providing a training and evaluation foundation that can be used for the development and enhancement of automated summarization models. ## News Categories | Category | # of news| | :-: | :-: | |Brazil | 14,131 | |Economy | 12,613 | |Entertainment | 5,337| |Health | 24,921| |Policy | 29,909 | |Science and Technology | 15,135 | |Sports | 2,915 | |Travel and Gastronomy | 2,893 | | World | 27,418 | | **Total** | **135,272** | ## PTT5-Summ Model We also trained the [PTT5](https://github.com/unicamp-dl/PTT5) model on this dataset and made it available on HuggingFace. [Click here to access](https://huggingface.co/recogna-nlp/ptt5-base-summ). # Citation ### RecognaSumm: A Novel Brazilian Summarization Dataset (PROPOR 2024) Comming soon
recogna-nlp/recognasumm
[ "task_categories:summarization", "size_categories:100K<n<1M", "language:pt", "license:mit", "pt", "pt-br", "summarization", "abstractive summarization", "news", "region:us" ]
2023-12-22T21:00:15+00:00
{"language": ["pt"], "license": "mit", "size_categories": ["100K<n<1M"], "task_categories": ["summarization"], "pretty_name": "RecognaSumm", "tags": ["pt", "pt-br", "summarization", "abstractive summarization", "news"]}
2024-01-09T22:44:47+00:00
[]
[ "pt" ]
TAGS #task_categories-summarization #size_categories-100K<n<1M #language-Portuguese #license-mit #pt #pt-br #summarization #abstractive summarization #news #region-us
RecognaSumm Dataset =================== Introduction ------------ RecognaSumm is a novel and comprehensive database specifically designed for the task of automatic text summarization in Portuguese. RecognaSumm stands out due to its diverse origin, composed of news collected from a variety of information sources, including agencies and online news portals. The database was constructed using web scraping techniques and careful curation, re sulting in a rich and representative collection of documents covering various topics and journalis tic styles. The creation of RecognaSumm aims to fill a significant void in Portuguese language summarization research, providing a training and evaluation foundation that can be used for the development and enhancement of automated summarization models. News Categories --------------- PTT5-Summ Model --------------- We also trained the PTT5 model on this dataset and made it available on HuggingFace. Click here to access. ### RecognaSumm: A Novel Brazilian Summarization Dataset (PROPOR 2024) ``` Comming soon ```
[ "### RecognaSumm: A Novel Brazilian Summarization Dataset (PROPOR 2024)\n\n\n\n```\nComming soon\n\n```" ]
[ "TAGS\n#task_categories-summarization #size_categories-100K<n<1M #language-Portuguese #license-mit #pt #pt-br #summarization #abstractive summarization #news #region-us \n", "### RecognaSumm: A Novel Brazilian Summarization Dataset (PROPOR 2024)\n\n\n\n```\nComming soon\n\n```" ]
[ 58, 28 ]
[ "passage: TAGS\n#task_categories-summarization #size_categories-100K<n<1M #language-Portuguese #license-mit #pt #pt-br #summarization #abstractive summarization #news #region-us \n### RecognaSumm: A Novel Brazilian Summarization Dataset (PROPOR 2024)\n\n\n\n```\nComming soon\n\n```" ]
0146d1a8588656c7922cce0e7f0f834e0e9da1f7
## Dataset Description - **Paper:** Under Review. - **Point of Contact:** Arijit Ghosh, [email protected] ### Dataset Summary This dataset is based on the BraTS2023 dataset and is supposed to be used for Multi-domain Image-to-Image Translation task. It takes 5 middle slices from each nifti volume of the BraTS2023 dataset after normalizing to a value of (-1,1). All of these images are `.npy` files and one can load them using the `np.load(FILEPATH).astype(np.float32)`. We provide the training and the test set which contains 6255 and 1095 files respectively for each domain. These are actually 4 domains, and are named accordingly. It is highly recommend to create a separate validation set from the training dataset for applications. We use `Pytorch` to do this. We do this by using the following command. ```python seed = 97 train_dataset, val_dataset = torch.utils.data.random_split( dataset, lengths=(0.9, 0.1), generator=torch.Generator().manual_seed(seed) ) # dataset is the dataset instance. ``` This dataset is actually part of a paper which is under peer-review currently. We hope this helps the community.
sohonjit/brats2023_multidomain_i2i
[ "task_categories:image-to-image", "language:en", "license:mit", "medical", "region:us" ]
2023-12-22T21:04:08+00:00
{"language": ["en"], "license": "mit", "task_categories": ["image-to-image"], "tags": ["medical"]}
2023-12-22T21:44:00+00:00
[]
[ "en" ]
TAGS #task_categories-image-to-image #language-English #license-mit #medical #region-us
## Dataset Description - Paper: Under Review. - Point of Contact: Arijit Ghosh, URL@URL ### Dataset Summary This dataset is based on the BraTS2023 dataset and is supposed to be used for Multi-domain Image-to-Image Translation task. It takes 5 middle slices from each nifti volume of the BraTS2023 dataset after normalizing to a value of (-1,1). All of these images are '.npy' files and one can load them using the 'URL(FILEPATH).astype(np.float32)'. We provide the training and the test set which contains 6255 and 1095 files respectively for each domain. These are actually 4 domains, and are named accordingly. It is highly recommend to create a separate validation set from the training dataset for applications. We use 'Pytorch' to do this. We do this by using the following command. This dataset is actually part of a paper which is under peer-review currently. We hope this helps the community.
[ "## Dataset Description\n\n- Paper: Under Review.\n- Point of Contact: Arijit Ghosh, URL@URL", "### Dataset Summary\nThis dataset is based on the BraTS2023 dataset and is supposed to be used for Multi-domain Image-to-Image Translation task. It takes 5 middle slices from each nifti volume of the BraTS2023 dataset after normalizing to a value of (-1,1). All of these images are '.npy' files and one can load them using the 'URL(FILEPATH).astype(np.float32)'. We provide the training and the test set which contains 6255 and 1095 files respectively for each domain. These are actually 4 domains, and are named accordingly. \n\nIt is highly recommend to create a separate validation set from the training dataset for applications. We use 'Pytorch' to do this. We do this by using the following command.\n\n\n\nThis dataset is actually part of a paper which is under peer-review currently. \n\nWe hope this helps the community." ]
[ "TAGS\n#task_categories-image-to-image #language-English #license-mit #medical #region-us \n", "## Dataset Description\n\n- Paper: Under Review.\n- Point of Contact: Arijit Ghosh, URL@URL", "### Dataset Summary\nThis dataset is based on the BraTS2023 dataset and is supposed to be used for Multi-domain Image-to-Image Translation task. It takes 5 middle slices from each nifti volume of the BraTS2023 dataset after normalizing to a value of (-1,1). All of these images are '.npy' files and one can load them using the 'URL(FILEPATH).astype(np.float32)'. We provide the training and the test set which contains 6255 and 1095 files respectively for each domain. These are actually 4 domains, and are named accordingly. \n\nIt is highly recommend to create a separate validation set from the training dataset for applications. We use 'Pytorch' to do this. We do this by using the following command.\n\n\n\nThis dataset is actually part of a paper which is under peer-review currently. \n\nWe hope this helps the community." ]
[ 30, 22, 207 ]
[ "passage: TAGS\n#task_categories-image-to-image #language-English #license-mit #medical #region-us \n## Dataset Description\n\n- Paper: Under Review.\n- Point of Contact: Arijit Ghosh, URL@URL### Dataset Summary\nThis dataset is based on the BraTS2023 dataset and is supposed to be used for Multi-domain Image-to-Image Translation task. It takes 5 middle slices from each nifti volume of the BraTS2023 dataset after normalizing to a value of (-1,1). All of these images are '.npy' files and one can load them using the 'URL(FILEPATH).astype(np.float32)'. We provide the training and the test set which contains 6255 and 1095 files respectively for each domain. These are actually 4 domains, and are named accordingly. \n\nIt is highly recommend to create a separate validation set from the training dataset for applications. We use 'Pytorch' to do this. We do this by using the following command.\n\n\n\nThis dataset is actually part of a paper which is under peer-review currently. \n\nWe hope this helps the community." ]
2fdb127ad6b6e6cbc123723fe19855d6f7ea476b
This dataset consists of 26,000 anime style images, half of which are foreground characters or objects, and the other half of which are backgrounds. It is intended for training segmentation or matting models where the foreground subject can be extracted from the background. The foundation of this dataset is based upon https://huggingface.co/datasets/skytnt/anime-segmentation I found the overall quality of that dataset did not meet my needs, so I did a lot of automated and manual inspection of the images, resulting in removing more than half of them, and then adding many more new images. For the foreground images, I have removed ones containing nudity or extreme lewdness. I also carefully examined the images to remove ones containing the following issues: stray pixels in the image or the alpha channel, images that are cut off at the edge of the frame, semi-transparent areas, fuzzy/blurry areas in the alpha channel, drop shadows, text and other unrelated items appear in the image, partial backgrounds are behind the characters. The foreground images are mostly taken from booru image sites, but I have also added some sprites from games and visual novels, as well as images from various "transparent png" archives. I also tried to bring in more images of male characters and non-human creatures. For the background images, I wanted to ensure that there was a stronger representation of backgrounds from actual anime videos. The original anime-segmentation dataset contained many abstract backgrounds and patterns, and most of the images were from anime-style illustrations rather than anime video. While some of those images have been preserved, I removed a large portion of them, and replaced them with backgrounds obtained from a variety of other sources, such as torrents and twitter, as well as manually capturing images from numerous anime videos myself. I also added a number of solid color background images, but these are located at the very end of the dataset and can be easily removed if you wish. Across the entire dataset, I have also made efforts to remove duplicate and similar images. The backgrounds all have a minimum size of 1024x1024. The foreground images are all various sizes, but should generally be small enough not to obscure the entire background image.
Zarxrax/anime_image_segmentation
[ "task_categories:image-segmentation", "size_categories:10K<n<100K", "region:us" ]
2023-12-22T21:34:35+00:00
{"size_categories": ["10K<n<100K"], "task_categories": ["image-segmentation"], "configs": [{"config_name": "default", "data_files": [{"split": "background", "path": "bg-01.zip"}, {"split": "foreground", "path": "fg-01.zip"}]}]}
2024-01-28T16:42:00+00:00
[]
[]
TAGS #task_categories-image-segmentation #size_categories-10K<n<100K #region-us
This dataset consists of 26,000 anime style images, half of which are foreground characters or objects, and the other half of which are backgrounds. It is intended for training segmentation or matting models where the foreground subject can be extracted from the background. The foundation of this dataset is based upon URL I found the overall quality of that dataset did not meet my needs, so I did a lot of automated and manual inspection of the images, resulting in removing more than half of them, and then adding many more new images. For the foreground images, I have removed ones containing nudity or extreme lewdness. I also carefully examined the images to remove ones containing the following issues: stray pixels in the image or the alpha channel, images that are cut off at the edge of the frame, semi-transparent areas, fuzzy/blurry areas in the alpha channel, drop shadows, text and other unrelated items appear in the image, partial backgrounds are behind the characters. The foreground images are mostly taken from booru image sites, but I have also added some sprites from games and visual novels, as well as images from various "transparent png" archives. I also tried to bring in more images of male characters and non-human creatures. For the background images, I wanted to ensure that there was a stronger representation of backgrounds from actual anime videos. The original anime-segmentation dataset contained many abstract backgrounds and patterns, and most of the images were from anime-style illustrations rather than anime video. While some of those images have been preserved, I removed a large portion of them, and replaced them with backgrounds obtained from a variety of other sources, such as torrents and twitter, as well as manually capturing images from numerous anime videos myself. I also added a number of solid color background images, but these are located at the very end of the dataset and can be easily removed if you wish. Across the entire dataset, I have also made efforts to remove duplicate and similar images. The backgrounds all have a minimum size of 1024x1024. The foreground images are all various sizes, but should generally be small enough not to obscure the entire background image.
[]
[ "TAGS\n#task_categories-image-segmentation #size_categories-10K<n<100K #region-us \n" ]
[ 30 ]
[ "passage: TAGS\n#task_categories-image-segmentation #size_categories-10K<n<100K #region-us \n" ]
73c73bc67da57e3fe92b3c07a634b348f6befbcb
# Dataset Card for "DreamDisPix-blip2-captions" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Norod78/DreamDisPix-blip2-captions
[ "region:us" ]
2023-12-22T22:38:12+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 205276821.0, "num_examples": 809}], "download_size": 204022177, "dataset_size": 205276821.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-12-22T22:38:37+00:00
[]
[]
TAGS #region-us
# Dataset Card for "DreamDisPix-blip2-captions" More Information needed
[ "# Dataset Card for \"DreamDisPix-blip2-captions\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"DreamDisPix-blip2-captions\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"DreamDisPix-blip2-captions\"\n\nMore Information needed" ]
00efdc176fe2b49dceb17b8508c6842c9e8ac122
This dataset is a combination of Stanford's Alpaca (https://github.com/tatsu-lab/stanford_alpaca) and FiQA (https://sites.google.com/view/fiqa/) with another 1.3k pairs custom generated using GPT3.5 Script for tuning through Kaggle's (https://www.kaggle.com) free resources using PEFT/LoRa: https://www.kaggle.com/code/gbhacker23/wealth-alpaca-lora GitHub repo with performance analyses, training and data generation scripts, and inference notebooks: https://github.com/gaurangbharti1/wealth-alpaca Cleaner dataset: https://huggingface.co/datasets/gbharti/wealth-alpaca_lora (no major changes, just cleaned up) CSV format: https://huggingface.co/datasets/gbharti/finance-alpaca-csv
csujeong/financial_data
[ "language:en", "region:us" ]
2023-12-23T00:23:10+00:00
{"language": ["en"]}
2023-12-23T02:10:43+00:00
[]
[ "en" ]
TAGS #language-English #region-us
This dataset is a combination of Stanford's Alpaca (URL and FiQA (URL with another 1.3k pairs custom generated using GPT3.5 Script for tuning through Kaggle's (URL) free resources using PEFT/LoRa: URL GitHub repo with performance analyses, training and data generation scripts, and inference notebooks: URL Cleaner dataset: URL (no major changes, just cleaned up) CSV format: URL
[]
[ "TAGS\n#language-English #region-us \n" ]
[ 10 ]
[ "passage: TAGS\n#language-English #region-us \n" ]
c58d83dd5aaf87c34e51aa70c156bfd648f9dc77
# Dataset Card for "quirky_squaring_increment0" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_squaring_increment0
[ "region:us" ]
2023-12-23T00:40:24+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 12764044, "num_examples": 184000}, {"name": "validation", "num_bytes": 555184, "num_examples": 8000}, {"name": "test", "num_bytes": 555170, "num_examples": 8000}], "download_size": 3750942, "dataset_size": 13874398}}
2024-01-11T21:33:00+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_squaring_increment0" More Information needed
[ "# Dataset Card for \"quirky_squaring_increment0\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_squaring_increment0\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_squaring_increment0\"\n\nMore Information needed" ]
338e0ad026a54d95349d251596da80cf5cb6d95b
# Dataset Card for "quirky_squaring_increment0_alice_easy" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_squaring_increment0_alice_easy
[ "region:us" ]
2023-12-23T00:40:38+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 1595505.5, "num_examples": 23000}, {"name": "validation", "num_bytes": 67316.06, "num_examples": 970}, {"name": "test", "num_bytes": 68355.30625, "num_examples": 985}], "download_size": 582231, "dataset_size": 1731176.86625}}
2024-01-11T21:33:06+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_squaring_increment0_alice_easy" More Information needed
[ "# Dataset Card for \"quirky_squaring_increment0_alice_easy\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_squaring_increment0_alice_easy\"\n\nMore Information needed" ]
[ 6, 26 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_squaring_increment0_alice_easy\"\n\nMore Information needed" ]
933ff31ed247c5095a278b7e0dd1d30f24659166
# Dataset Card for "quirky_squaring_increment0_alice_hard" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_squaring_increment0_alice_hard
[ "region:us" ]
2023-12-23T00:40:51+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 1595505.5, "num_examples": 23000}, {"name": "validation", "num_bytes": 72173.92, "num_examples": 1040}, {"name": "test", "num_bytes": 72935.45875, "num_examples": 1051}], "download_size": 650868, "dataset_size": 1740614.87875}}
2024-01-11T21:33:11+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_squaring_increment0_alice_hard" More Information needed
[ "# Dataset Card for \"quirky_squaring_increment0_alice_hard\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_squaring_increment0_alice_hard\"\n\nMore Information needed" ]
[ 6, 24 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_squaring_increment0_alice_hard\"\n\nMore Information needed" ]
e0d377543b3f4c86772bb6aff381c60e6b2c51de
# Dataset Card for "quirky_squaring_increment0_alice" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_squaring_increment0_alice
[ "region:us" ]
2023-12-23T00:41:01+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 6382022.0, "num_examples": 92000}, {"name": "validation", "num_bytes": 277592.0, "num_examples": 4000}, {"name": "test", "num_bytes": 277585.0, "num_examples": 4000}], "download_size": 2581961, "dataset_size": 6937199.0}}
2024-01-11T21:33:19+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_squaring_increment0_alice" More Information needed
[ "# Dataset Card for \"quirky_squaring_increment0_alice\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_squaring_increment0_alice\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_squaring_increment0_alice\"\n\nMore Information needed" ]
5e84c68486496ebbd1831cc38886dfaffbfd8424
# Dataset Card for "quirky_squaring_increment0_bob_easy" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_squaring_increment0_bob_easy
[ "region:us" ]
2023-12-23T00:41:21+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 1595505.5, "num_examples": 23000}, {"name": "validation", "num_bytes": 67316.06, "num_examples": 970}, {"name": "test", "num_bytes": 68355.30625, "num_examples": 985}], "download_size": 578238, "dataset_size": 1731176.86625}}
2024-01-11T21:33:25+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_squaring_increment0_bob_easy" More Information needed
[ "# Dataset Card for \"quirky_squaring_increment0_bob_easy\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_squaring_increment0_bob_easy\"\n\nMore Information needed" ]
[ 6, 26 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_squaring_increment0_bob_easy\"\n\nMore Information needed" ]
e98de83b61bca3ce7559d86b807dabab06559bbc
# Dataset Card for "quirky_squaring_increment0_bob_hard" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_squaring_increment0_bob_hard
[ "region:us" ]
2023-12-23T00:41:31+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 1595505.5, "num_examples": 23000}, {"name": "validation", "num_bytes": 72173.92, "num_examples": 1040}, {"name": "test", "num_bytes": 72935.45875, "num_examples": 1051}], "download_size": 642938, "dataset_size": 1740614.87875}}
2024-01-11T21:33:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_squaring_increment0_bob_hard" More Information needed
[ "# Dataset Card for \"quirky_squaring_increment0_bob_hard\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_squaring_increment0_bob_hard\"\n\nMore Information needed" ]
[ 6, 24 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_squaring_increment0_bob_hard\"\n\nMore Information needed" ]
ea433e9839bd3e2398ce1f58926d5b4fafa7fc31
# Dataset Card for "quirky_squaring_increment0_bob" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_squaring_increment0_bob
[ "region:us" ]
2023-12-23T00:41:42+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 6382022.0, "num_examples": 92000}, {"name": "validation", "num_bytes": 277592.0, "num_examples": 4000}, {"name": "test", "num_bytes": 277585.0, "num_examples": 4000}], "download_size": 2564699, "dataset_size": 6937199.0}}
2024-01-11T21:33:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_squaring_increment0_bob" More Information needed
[ "# Dataset Card for \"quirky_squaring_increment0_bob\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_squaring_increment0_bob\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_squaring_increment0_bob\"\n\nMore Information needed" ]
ccdc299d633e583fa1aac04b471c6f1991627e3a
# Dataset Card for "quirky_modularaddition_increment0" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_modularaddition_increment0
[ "region:us" ]
2023-12-23T00:51:22+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 28453332, "num_examples": 384000}, {"name": "validation", "num_bytes": 592818, "num_examples": 8000}, {"name": "test", "num_bytes": 592680, "num_examples": 8000}], "download_size": 6297439, "dataset_size": 29638830}}
2024-01-11T21:31:44+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_modularaddition_increment0" More Information needed
[ "# Dataset Card for \"quirky_modularaddition_increment0\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_modularaddition_increment0\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_modularaddition_increment0\"\n\nMore Information needed" ]
9d4dca3201c44316537bc87cc89110aee16089fd
# Dataset Card for "quirky_modularaddition_increment0_alice_easy" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_modularaddition_increment0_alice_easy
[ "region:us" ]
2023-12-23T00:51:35+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 3503242.40528125, "num_examples": 47279}, {"name": "validation", "num_bytes": 70323.03525, "num_examples": 949}, {"name": "test", "num_bytes": 75048.105, "num_examples": 1013}], "download_size": 766933, "dataset_size": 3648613.54553125}}
2024-01-11T21:31:54+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_modularaddition_increment0_alice_easy" More Information needed
[ "# Dataset Card for \"quirky_modularaddition_increment0_alice_easy\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_modularaddition_increment0_alice_easy\"\n\nMore Information needed" ]
[ 6, 27 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_modularaddition_increment0_alice_easy\"\n\nMore Information needed" ]
5320f6487ddbc05933e547ba49a13910c8de67ec
# Dataset Card for "quirky_modularaddition_increment0_alice_hard" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_modularaddition_increment0_alice_hard
[ "region:us" ]
2023-12-23T00:51:49+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 3563112.95803125, "num_examples": 48087}, {"name": "validation", "num_bytes": 75436.0905, "num_examples": 1018}, {"name": "test", "num_bytes": 73418.235, "num_examples": 991}], "download_size": 1107453, "dataset_size": 3711967.28353125}}
2024-01-11T21:32:03+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_modularaddition_increment0_alice_hard" More Information needed
[ "# Dataset Card for \"quirky_modularaddition_increment0_alice_hard\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_modularaddition_increment0_alice_hard\"\n\nMore Information needed" ]
[ 6, 25 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_modularaddition_increment0_alice_hard\"\n\nMore Information needed" ]
c4efbc1661ff70a228ac2b23d4f0584929d11d4d
# Dataset Card for "quirky_modularaddition_increment0_alice" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_modularaddition_increment0_alice
[ "region:us" ]
2023-12-23T00:52:08+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 14226666.0, "num_examples": 192000}, {"name": "validation", "num_bytes": 296409.0, "num_examples": 4000}, {"name": "test", "num_bytes": 296340.0, "num_examples": 4000}], "download_size": 3850150, "dataset_size": 14819415.0}}
2024-01-11T21:32:17+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_modularaddition_increment0_alice" More Information needed
[ "# Dataset Card for \"quirky_modularaddition_increment0_alice\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_modularaddition_increment0_alice\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_modularaddition_increment0_alice\"\n\nMore Information needed" ]
180309a818f30e3802ad90d95d1bddbc50a16400
# Dataset Card for "quirky_modularaddition_increment0_bob_easy" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_modularaddition_increment0_bob_easy
[ "region:us" ]
2023-12-23T00:52:55+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 3503242.40528125, "num_examples": 47279}, {"name": "validation", "num_bytes": 70323.03525, "num_examples": 949}, {"name": "test", "num_bytes": 75048.105, "num_examples": 1013}], "download_size": 764642, "dataset_size": 3648613.54553125}}
2024-01-11T21:32:25+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_modularaddition_increment0_bob_easy" More Information needed
[ "# Dataset Card for \"quirky_modularaddition_increment0_bob_easy\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_modularaddition_increment0_bob_easy\"\n\nMore Information needed" ]
[ 6, 27 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_modularaddition_increment0_bob_easy\"\n\nMore Information needed" ]
434da1fb03f7708d4000428b09122f2638d68e6a
# Dataset Card for "quirky_modularaddition_increment0_bob_hard" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_modularaddition_increment0_bob_hard
[ "region:us" ]
2023-12-23T00:53:07+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 3563112.95803125, "num_examples": 48087}, {"name": "validation", "num_bytes": 75436.0905, "num_examples": 1018}, {"name": "test", "num_bytes": 73418.235, "num_examples": 991}], "download_size": 1104505, "dataset_size": 3711967.28353125}}
2024-01-11T21:32:33+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_modularaddition_increment0_bob_hard" More Information needed
[ "# Dataset Card for \"quirky_modularaddition_increment0_bob_hard\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_modularaddition_increment0_bob_hard\"\n\nMore Information needed" ]
[ 6, 25 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_modularaddition_increment0_bob_hard\"\n\nMore Information needed" ]
6a7d6556870674bb219733ced4b49e85fafdfdc6
# Dataset Card for "quirky_modularaddition_increment0_bob" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_modularaddition_increment0_bob
[ "region:us" ]
2023-12-23T00:53:27+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 14226666.0, "num_examples": 192000}, {"name": "validation", "num_bytes": 296409.0, "num_examples": 4000}, {"name": "test", "num_bytes": 296340.0, "num_examples": 4000}], "download_size": 3838636, "dataset_size": 14819415.0}}
2024-01-11T21:32:46+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_modularaddition_increment0_bob" More Information needed
[ "# Dataset Card for \"quirky_modularaddition_increment0_bob\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_modularaddition_increment0_bob\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_modularaddition_increment0_bob\"\n\nMore Information needed" ]
d192dd653c9c108c6c84c27740355ef407f750a7
# Dataset Card for "quirky_subtraction_increment0" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_subtraction_increment0
[ "region:us" ]
2023-12-23T00:59:11+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 25327958, "num_examples": 384000}, {"name": "validation", "num_bytes": 527812, "num_examples": 8000}, {"name": "test", "num_bytes": 527524, "num_examples": 8000}], "download_size": 6563630, "dataset_size": 26383294}}
2024-01-11T21:28:40+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_subtraction_increment0" More Information needed
[ "# Dataset Card for \"quirky_subtraction_increment0\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_subtraction_increment0\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_subtraction_increment0\"\n\nMore Information needed" ]
5a36279716ff0356f632eca903871d6546ba6a60
# Dataset Card for "quirky_subtraction_increment0_alice_easy" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_subtraction_increment0_alice_easy
[ "region:us" ]
2023-12-23T00:59:27+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 3149505.1940104165, "num_examples": 47750}, {"name": "validation", "num_bytes": 64327.0875, "num_examples": 975}, {"name": "test", "num_bytes": 64819.5115, "num_examples": 983}], "download_size": 842501, "dataset_size": 3278651.7930104164}}
2024-01-11T21:28:48+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_subtraction_increment0_alice_easy" More Information needed
[ "# Dataset Card for \"quirky_subtraction_increment0_alice_easy\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_subtraction_increment0_alice_easy\"\n\nMore Information needed" ]
[ 6, 25 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_subtraction_increment0_alice_easy\"\n\nMore Information needed" ]
60ff2c88c8c7e5fbf41c1b667ebbdecfe4f2a41f
# Dataset Card for "quirky_subtraction_increment0_alice_hard" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_subtraction_increment0_alice_hard
[ "region:us" ]
2023-12-23T00:59:34+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 3169094.7865260416, "num_examples": 48047}, {"name": "validation", "num_bytes": 66834.1945, "num_examples": 1013}, {"name": "test", "num_bytes": 66270.2025, "num_examples": 1005}], "download_size": 1193353, "dataset_size": 3302199.183526042}}
2024-01-11T21:28:59+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_subtraction_increment0_alice_hard" More Information needed
[ "# Dataset Card for \"quirky_subtraction_increment0_alice_hard\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_subtraction_increment0_alice_hard\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_subtraction_increment0_alice_hard\"\n\nMore Information needed" ]
69505ccb1e9fcf10c50958b66bbd92e3476efa97
# Dataset Card for "quirky_subtraction_increment0_alice" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_subtraction_increment0_alice
[ "region:us" ]
2023-12-23T00:59:49+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 12663979.0, "num_examples": 192000}, {"name": "validation", "num_bytes": 263906.0, "num_examples": 4000}, {"name": "test", "num_bytes": 263762.0, "num_examples": 4000}], "download_size": 4096814, "dataset_size": 13191647.0}}
2024-01-11T21:29:12+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_subtraction_increment0_alice" More Information needed
[ "# Dataset Card for \"quirky_subtraction_increment0_alice\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_subtraction_increment0_alice\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_subtraction_increment0_alice\"\n\nMore Information needed" ]
b1aac0341ff8d4ad73761b303d953f9fb94d1f13
# Dataset Card for "quirky_subtraction_increment0_bob_easy" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_subtraction_increment0_bob_easy
[ "region:us" ]
2023-12-23T01:00:12+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 3149505.1940104165, "num_examples": 47750}, {"name": "validation", "num_bytes": 64327.0875, "num_examples": 975}, {"name": "test", "num_bytes": 64819.5115, "num_examples": 983}], "download_size": 837143, "dataset_size": 3278651.7930104164}}
2024-01-11T21:29:22+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_subtraction_increment0_bob_easy" More Information needed
[ "# Dataset Card for \"quirky_subtraction_increment0_bob_easy\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_subtraction_increment0_bob_easy\"\n\nMore Information needed" ]
[ 6, 25 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_subtraction_increment0_bob_easy\"\n\nMore Information needed" ]
4aac72ccca7694dcabbaca602c2ad7923b76e4a3
# Dataset Card for "quirky_subtraction_increment0_bob_hard" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_subtraction_increment0_bob_hard
[ "region:us" ]
2023-12-23T01:00:20+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 3169094.7865260416, "num_examples": 48047}, {"name": "validation", "num_bytes": 66834.1945, "num_examples": 1013}, {"name": "test", "num_bytes": 66270.2025, "num_examples": 1005}], "download_size": 1186212, "dataset_size": 3302199.183526042}}
2024-01-11T21:29:29+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_subtraction_increment0_bob_hard" More Information needed
[ "# Dataset Card for \"quirky_subtraction_increment0_bob_hard\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_subtraction_increment0_bob_hard\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_subtraction_increment0_bob_hard\"\n\nMore Information needed" ]
95d3013d612f2419175d62e1f398b9ded9ee980c
# Dataset Card for "quirky_subtraction_increment0_bob" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_subtraction_increment0_bob
[ "region:us" ]
2023-12-23T01:00:32+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 12663979.0, "num_examples": 192000}, {"name": "validation", "num_bytes": 263906.0, "num_examples": 4000}, {"name": "test", "num_bytes": 263762.0, "num_examples": 4000}], "download_size": 4073079, "dataset_size": 13191647.0}}
2024-01-11T21:29:44+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_subtraction_increment0_bob" More Information needed
[ "# Dataset Card for \"quirky_subtraction_increment0_bob\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_subtraction_increment0_bob\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_subtraction_increment0_bob\"\n\nMore Information needed" ]
55d925bd9245ff4e982ff1ac925564d1400b629d
# Dataset Card for "quirky_addition" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
atmallen/quirky_addition
[ "region:us" ]
2023-12-23T01:04:04+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 26317614, "num_examples": 392000}, {"name": "validation", "num_bytes": 268420, "num_examples": 4000}, {"name": "test", "num_bytes": 268552, "num_examples": 4000}], "download_size": 0, "dataset_size": 26854586}}
2023-12-23T01:18:50+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_addition" More Information needed
[ "# Dataset Card for \"quirky_addition\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_addition\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_addition\"\n\nMore Information needed" ]
b5d0474b6ebc0b4492abd88d47979c4e1603c10b
# Dataset Card for "KoAlpaca-v1.1a" ## Project Repo - Github Repo: [Beomi/KoAlpaca](https://github.com/Beomi/KoAlpaca) ## How to use ```python >>> from datasets import load_dataset >>> ds = load_dataset("beomi/KoAlpaca-v1.1a", split="train") >>> ds Dataset({ features: ['instruction', 'input', 'output'], num_rows: 21155 }) ``` ```python >>> ds[0] {'instruction': '양파는 어떤 식물 부위인가요? 그리고 고구마는 뿌리인가요?', 'output': '양파는 잎이 아닌 식물의 줄기 부분입니다. 고구마는 식물의 뿌리 부분입니다. \n\n식물의 부위의 구분에 대해 궁금해하는 분이라면 분명 이 질문에 대한 답을 찾고 있을 것입니다. 양파는 잎이 아닌 줄기 부분입니다. 고구마는 다른 질문과 답변에서 언급된 것과 같이 뿌리 부분입니다. 따라서, 양파는 식물의 줄기 부분이 되고, 고구마는 식물의 뿌리 부분입니다.\n\n 덧붙이는 답변: 고구마 줄기도 볶아먹을 수 있나요? \n\n고구마 줄기도 식용으로 볶아먹을 수 있습니다. 하지만 줄기 뿐만 아니라, 잎, 씨, 뿌리까지 모든 부위가 식용으로 활용되기도 합니다. 다만, 한국에서는 일반적으로 뿌리 부분인 고구마를 주로 먹습니다.', 'url': 'https://kin.naver.com/qna/detail.naver?d1id=11&dirId=1116&docId=55320268'} ```
csujeong/KoAlpaca-v1.1a
[ "task_categories:text-generation", "language:ko", "KoAlpaca", "region:us" ]
2023-12-23T01:13:13+00:00
{"language": ["ko"], "task_categories": ["text-generation"], "pretty_name": "KoAlpaca-v1.1a", "dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "url", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 23371027, "num_examples": 21155}], "download_size": 12856014, "dataset_size": 23371027}, "tags": ["KoAlpaca"]}
2023-12-23T01:18:03+00:00
[]
[ "ko" ]
TAGS #task_categories-text-generation #language-Korean #KoAlpaca #region-us
# Dataset Card for "KoAlpaca-v1.1a" ## Project Repo - Github Repo: Beomi/KoAlpaca ## How to use
[ "# Dataset Card for \"KoAlpaca-v1.1a\"", "## Project Repo\n\n- Github Repo: Beomi/KoAlpaca", "## How to use" ]
[ "TAGS\n#task_categories-text-generation #language-Korean #KoAlpaca #region-us \n", "# Dataset Card for \"KoAlpaca-v1.1a\"", "## Project Repo\n\n- Github Repo: Beomi/KoAlpaca", "## How to use" ]
[ 27, 15, 18, 4 ]
[ "passage: TAGS\n#task_categories-text-generation #language-Korean #KoAlpaca #region-us \n# Dataset Card for \"KoAlpaca-v1.1a\"## Project Repo\n\n- Github Repo: Beomi/KoAlpaca## How to use" ]
89d933e6eaef875522f2e7aba16aa6ffdb3f9bf0
# Dataset Card for "quirky_addition_increment0" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_addition_increment0
[ "region:us" ]
2023-12-23T01:38:04+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 25241388, "num_examples": 384000}, {"name": "validation", "num_bytes": 526318, "num_examples": 8000}, {"name": "test", "num_bytes": 526068, "num_examples": 8000}], "download_size": 6538447, "dataset_size": 26293774}}
2024-01-11T21:26:54+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_addition_increment0" More Information needed
[ "# Dataset Card for \"quirky_addition_increment0\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_addition_increment0\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_addition_increment0\"\n\nMore Information needed" ]
efc88352d2924ef4f81da2f5aae3ed57d365410a
# Dataset Card for "quirky_addition_increment0_alice_easy" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_addition_increment0_alice_easy
[ "region:us" ]
2023-12-23T01:38:22+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 3126579.74015625, "num_examples": 47565}, {"name": "validation", "num_bytes": 62631.842, "num_examples": 952}, {"name": "test", "num_bytes": 65758.5, "num_examples": 1000}], "download_size": 818093, "dataset_size": 3254970.0821562503}}
2024-01-11T21:27:03+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_addition_increment0_alice_easy" More Information needed
[ "# Dataset Card for \"quirky_addition_increment0_alice_easy\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_addition_increment0_alice_easy\"\n\nMore Information needed" ]
[ 6, 26 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_addition_increment0_alice_easy\"\n\nMore Information needed" ]
9a3b6c86536385603e52e17baa8ce3ad4532cc70
# Dataset Card for "quirky_addition_increment0_alice_hard" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_addition_increment0_alice_hard
[ "region:us" ]
2023-12-23T01:38:29+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 3160695.053625, "num_examples": 48084}, {"name": "validation", "num_bytes": 67829.23225, "num_examples": 1031}, {"name": "test", "num_bytes": 67862.772, "num_examples": 1032}], "download_size": 1197148, "dataset_size": 3296387.057875}}
2024-01-11T21:27:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_addition_increment0_alice_hard" More Information needed
[ "# Dataset Card for \"quirky_addition_increment0_alice_hard\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_addition_increment0_alice_hard\"\n\nMore Information needed" ]
[ 6, 24 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_addition_increment0_alice_hard\"\n\nMore Information needed" ]
8bdd35c56d4ce5605affa59d6eb9c9ac64e07cd3
# Dataset Card for "quirky_addition_increment0_alice" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_addition_increment0_alice
[ "region:us" ]
2023-12-23T01:38:49+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 12620694.0, "num_examples": 192000}, {"name": "validation", "num_bytes": 263159.0, "num_examples": 4000}, {"name": "test", "num_bytes": 263034.0, "num_examples": 4000}], "download_size": 4075447, "dataset_size": 13146887.0}}
2024-01-11T21:27:26+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_addition_increment0_alice" More Information needed
[ "# Dataset Card for \"quirky_addition_increment0_alice\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_addition_increment0_alice\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_addition_increment0_alice\"\n\nMore Information needed" ]
8371bcf51330de69f7e531ab286b6e8cdb1b87e1
# Dataset Card for "quirky_addition_increment0_bob_easy" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_addition_increment0_bob_easy
[ "region:us" ]
2023-12-23T01:39:10+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 3126579.74015625, "num_examples": 47565}, {"name": "validation", "num_bytes": 62631.842, "num_examples": 952}, {"name": "test", "num_bytes": 65758.5, "num_examples": 1000}], "download_size": 811594, "dataset_size": 3254970.0821562503}}
2024-01-11T21:27:34+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_addition_increment0_bob_easy" More Information needed
[ "# Dataset Card for \"quirky_addition_increment0_bob_easy\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_addition_increment0_bob_easy\"\n\nMore Information needed" ]
[ 6, 26 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_addition_increment0_bob_easy\"\n\nMore Information needed" ]
59c8a4e3fc1df9a47fdce4dec3af18daf0a27994
# Dataset Card for "quirky_addition_increment0_bob_hard" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_addition_increment0_bob_hard
[ "region:us" ]
2023-12-23T01:39:25+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 3160695.053625, "num_examples": 48084}, {"name": "validation", "num_bytes": 67829.23225, "num_examples": 1031}, {"name": "test", "num_bytes": 67862.772, "num_examples": 1032}], "download_size": 1189962, "dataset_size": 3296387.057875}}
2024-01-11T21:27:44+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_addition_increment0_bob_hard" More Information needed
[ "# Dataset Card for \"quirky_addition_increment0_bob_hard\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_addition_increment0_bob_hard\"\n\nMore Information needed" ]
[ 6, 24 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_addition_increment0_bob_hard\"\n\nMore Information needed" ]
49bd03b914773ac9d3a91270f9477779a875b719
# Dataset Card for "quirky_addition_increment0_bob" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_addition_increment0_bob
[ "region:us" ]
2023-12-23T01:39:48+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 12620694.0, "num_examples": 192000}, {"name": "validation", "num_bytes": 263159.0, "num_examples": 4000}, {"name": "test", "num_bytes": 263034.0, "num_examples": 4000}], "download_size": 4052044, "dataset_size": 13146887.0}}
2024-01-11T21:28:03+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_addition_increment0_bob" More Information needed
[ "# Dataset Card for \"quirky_addition_increment0_bob\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_addition_increment0_bob\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_addition_increment0_bob\"\n\nMore Information needed" ]
5c9c2791b538c2532155fe619e3ecb9251b8f8bd
# Dataset Card for "quirky_multiplication_increment0" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_multiplication_increment0
[ "region:us" ]
2023-12-23T01:41:22+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 25392076, "num_examples": 384000}, {"name": "validation", "num_bytes": 529014, "num_examples": 8000}, {"name": "test", "num_bytes": 528892, "num_examples": 8000}], "download_size": 6513652, "dataset_size": 26449982}}
2024-01-11T21:30:14+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_multiplication_increment0" More Information needed
[ "# Dataset Card for \"quirky_multiplication_increment0\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_multiplication_increment0\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_multiplication_increment0\"\n\nMore Information needed" ]
93b99a89b7021bb6f89bec9cff18cc2e8b4d4299
# Dataset Card for "quirky_multiplication_increment0_alice_easy" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_multiplication_increment0_alice_easy
[ "region:us" ]
2023-12-23T01:41:34+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 3141608.153020833, "num_examples": 47510}, {"name": "validation", "num_bytes": 63878.4405, "num_examples": 966}, {"name": "test", "num_bytes": 65185.939, "num_examples": 986}], "download_size": 812173, "dataset_size": 3270672.532520833}}
2024-01-11T21:30:23+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_multiplication_increment0_alice_easy" More Information needed
[ "# Dataset Card for \"quirky_multiplication_increment0_alice_easy\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_multiplication_increment0_alice_easy\"\n\nMore Information needed" ]
[ 6, 25 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_multiplication_increment0_alice_easy\"\n\nMore Information needed" ]
bebc41caf5d927ecafba21084182b70252598521
# Dataset Card for "quirky_multiplication_increment0_alice_hard" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_multiplication_increment0_alice_hard
[ "region:us" ]
2023-12-23T01:41:41+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 3174803.002375, "num_examples": 48012}, {"name": "validation", "num_bytes": 64804.215, "num_examples": 980}, {"name": "test", "num_bytes": 64855.3815, "num_examples": 981}], "download_size": 1097428, "dataset_size": 3304462.598875}}
2024-01-11T21:30:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_multiplication_increment0_alice_hard" More Information needed
[ "# Dataset Card for \"quirky_multiplication_increment0_alice_hard\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_multiplication_increment0_alice_hard\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_multiplication_increment0_alice_hard\"\n\nMore Information needed" ]
c47e82a215a08206db46c85499a2e0c7e5de69db
# Dataset Card for "quirky_multiplication_increment0_alice" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_multiplication_increment0_alice
[ "region:us" ]
2023-12-23T01:41:54+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 12696038.0, "num_examples": 192000}, {"name": "validation", "num_bytes": 264507.0, "num_examples": 4000}, {"name": "test", "num_bytes": 264446.0, "num_examples": 4000}], "download_size": 4032256, "dataset_size": 13224991.0}}
2024-01-11T21:30:44+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_multiplication_increment0_alice" More Information needed
[ "# Dataset Card for \"quirky_multiplication_increment0_alice\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_multiplication_increment0_alice\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_multiplication_increment0_alice\"\n\nMore Information needed" ]
9fa603e875201bddefb36c162295f92a71442495
# Dataset Card for "quirky_multiplication_increment0_bob_easy" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_multiplication_increment0_bob_easy
[ "region:us" ]
2023-12-23T01:42:16+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 3141608.153020833, "num_examples": 47510}, {"name": "validation", "num_bytes": 63878.4405, "num_examples": 966}, {"name": "test", "num_bytes": 65185.939, "num_examples": 986}], "download_size": 806120, "dataset_size": 3270672.532520833}}
2024-01-11T21:30:52+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_multiplication_increment0_bob_easy" More Information needed
[ "# Dataset Card for \"quirky_multiplication_increment0_bob_easy\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_multiplication_increment0_bob_easy\"\n\nMore Information needed" ]
[ 6, 25 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_multiplication_increment0_bob_easy\"\n\nMore Information needed" ]
8320aa7831e15d7b93ca410c9cc76154d59559c4
# Dataset Card for "quirky_multiplication_increment0_bob_hard" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_multiplication_increment0_bob_hard
[ "region:us" ]
2023-12-23T01:42:26+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 3174803.002375, "num_examples": 48012}, {"name": "validation", "num_bytes": 64804.215, "num_examples": 980}, {"name": "test", "num_bytes": 64855.3815, "num_examples": 981}], "download_size": 1086712, "dataset_size": 3304462.598875}}
2024-01-11T21:31:02+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_multiplication_increment0_bob_hard" More Information needed
[ "# Dataset Card for \"quirky_multiplication_increment0_bob_hard\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_multiplication_increment0_bob_hard\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_multiplication_increment0_bob_hard\"\n\nMore Information needed" ]
8cca35edc42b1cea2cf6241cd1630b4b7d9bd4a0
# Dataset Card for "quirky_multiplication_increment0_bob" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EleutherAI/quirky_multiplication_increment0_bob
[ "region:us" ]
2023-12-23T01:42:38+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "alice_label", "dtype": "bool"}, {"name": "bob_label", "dtype": "bool"}, {"name": "difficulty", "dtype": "int64"}, {"name": "statement", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "character", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 12696038.0, "num_examples": 192000}, {"name": "validation", "num_bytes": 264507.0, "num_examples": 4000}, {"name": "test", "num_bytes": 264446.0, "num_examples": 4000}], "download_size": 4005318, "dataset_size": 13224991.0}}
2024-01-11T21:31:16+00:00
[]
[]
TAGS #region-us
# Dataset Card for "quirky_multiplication_increment0_bob" More Information needed
[ "# Dataset Card for \"quirky_multiplication_increment0_bob\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"quirky_multiplication_increment0_bob\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"quirky_multiplication_increment0_bob\"\n\nMore Information needed" ]
ea73073230d47321ee7f3b7e54934b827e13237a
# Dataset Card for Vietnamese Translation of Grade School Math 8K Dataset ## Dataset Summary This is a dataset translated from the original GSM8K dataset using GPT-3.5 to perform the translation task and tested by us. You can see the original [dataset in English](https://huggingface.co/datasets/gsm8k) for more details ## Language The language in the dataset is translated into Vietnamese, however some units such as currency or personal names are kept the same. ## Dataset Structure The data set has been slightly edited compared to the original GMS8K data, we separated the questions, explanations, and answers into different fields. ```python { "index": 0, "question": "Natalia đã bán kẹp tóc cho 48 người bạn của cô ấy vào tháng 4, và sau đó cô ấy đã bán nửa số lượng kẹp tóc đó vào tháng 5. Natalia đã bán tổng cộng bao nhiêu kẹp tóc trong tháng 4 và tháng 5?", "explanation": "Natalia đã bán 24 kẹp trong tháng 5.\nNatalia đã bán tổng cộng 72 kẹp trong tháng 4 và tháng 5.", "answer": "72" } ``` In addition, we have not translated the accompanying question parts from the GMS8K socratic dataset. You can see them in more detail in [GSM8K's Github here](https://github.com/openai/grade-school-math/tree/master/grade_school_math/data). We still welcome your contributions to this dataset. ## Data Fields The current data set structure includes the fields: - index: numerical order. - question: string for the content of a mathematical question. - explanation: string for explanation to the question. - answer: a single value to answer to the question.
hllj/vi_gsm8k
[ "task_categories:text-generation", "task_categories:text2text-generation", "size_categories:1K<n<10K", "language:vi", "license:mit", "region:us" ]
2023-12-23T02:07:33+00:00
{"language": ["vi"], "license": "mit", "size_categories": ["1K<n<10K"], "task_categories": ["text-generation", "text2text-generation"], "pretty_name": "Vietnamese Translation of Grade School Math 8K Dataset"}
2023-12-23T02:25:04+00:00
[]
[ "vi" ]
TAGS #task_categories-text-generation #task_categories-text2text-generation #size_categories-1K<n<10K #language-Vietnamese #license-mit #region-us
# Dataset Card for Vietnamese Translation of Grade School Math 8K Dataset ## Dataset Summary This is a dataset translated from the original GSM8K dataset using GPT-3.5 to perform the translation task and tested by us. You can see the original dataset in English for more details ## Language The language in the dataset is translated into Vietnamese, however some units such as currency or personal names are kept the same. ## Dataset Structure The data set has been slightly edited compared to the original GMS8K data, we separated the questions, explanations, and answers into different fields. In addition, we have not translated the accompanying question parts from the GMS8K socratic dataset. You can see them in more detail in GSM8K's Github here. We still welcome your contributions to this dataset. ## Data Fields The current data set structure includes the fields: - index: numerical order. - question: string for the content of a mathematical question. - explanation: string for explanation to the question. - answer: a single value to answer to the question.
[ "# Dataset Card for Vietnamese Translation of Grade School Math 8K Dataset", "## Dataset Summary\n\nThis is a dataset translated from the original GSM8K dataset using GPT-3.5 to perform the translation task and tested by us.\n\nYou can see the original dataset in English for more details", "## Language\nThe language in the dataset is translated into Vietnamese, however some units such as currency or personal names are kept the same.", "## Dataset Structure\nThe data set has been slightly edited compared to the original GMS8K data, we separated the questions, explanations, and answers into different fields.\n\n\n\nIn addition, we have not translated the accompanying question parts from the GMS8K socratic dataset. You can see them in more detail in GSM8K's Github here. \nWe still welcome your contributions to this dataset.", "## Data Fields\n\nThe current data set structure includes the fields:\n\n- index: numerical order.\n- question: string for the content of a mathematical question.\n- explanation: string for explanation to the question.\n- answer: a single value to answer to the question." ]
[ "TAGS\n#task_categories-text-generation #task_categories-text2text-generation #size_categories-1K<n<10K #language-Vietnamese #license-mit #region-us \n", "# Dataset Card for Vietnamese Translation of Grade School Math 8K Dataset", "## Dataset Summary\n\nThis is a dataset translated from the original GSM8K dataset using GPT-3.5 to perform the translation task and tested by us.\n\nYou can see the original dataset in English for more details", "## Language\nThe language in the dataset is translated into Vietnamese, however some units such as currency or personal names are kept the same.", "## Dataset Structure\nThe data set has been slightly edited compared to the original GMS8K data, we separated the questions, explanations, and answers into different fields.\n\n\n\nIn addition, we have not translated the accompanying question parts from the GMS8K socratic dataset. You can see them in more detail in GSM8K's Github here. \nWe still welcome your contributions to this dataset.", "## Data Fields\n\nThe current data set structure includes the fields:\n\n- index: numerical order.\n- question: string for the content of a mathematical question.\n- explanation: string for explanation to the question.\n- answer: a single value to answer to the question." ]
[ 54, 17, 49, 31, 97, 57 ]
[ "passage: TAGS\n#task_categories-text-generation #task_categories-text2text-generation #size_categories-1K<n<10K #language-Vietnamese #license-mit #region-us \n# Dataset Card for Vietnamese Translation of Grade School Math 8K Dataset## Dataset Summary\n\nThis is a dataset translated from the original GSM8K dataset using GPT-3.5 to perform the translation task and tested by us.\n\nYou can see the original dataset in English for more details## Language\nThe language in the dataset is translated into Vietnamese, however some units such as currency or personal names are kept the same.## Dataset Structure\nThe data set has been slightly edited compared to the original GMS8K data, we separated the questions, explanations, and answers into different fields.\n\n\n\nIn addition, we have not translated the accompanying question parts from the GMS8K socratic dataset. You can see them in more detail in GSM8K's Github here. \nWe still welcome your contributions to this dataset.## Data Fields\n\nThe current data set structure includes the fields:\n\n- index: numerical order.\n- question: string for the content of a mathematical question.\n- explanation: string for explanation to the question.\n- answer: a single value to answer to the question." ]
17af9e604195b6d92db319712baa2534bd1306e2
# Dataset Card for Dataset Name <!-- Provide a quick summary of the dataset. --> This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> Law documents legislated in China. - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
cfa532/CHLAWS
[ "language:zh", "license:mit", "region:us" ]
2023-12-23T02:13:23+00:00
{"language": ["zh"], "license": "mit", "pretty_name": "Law & order", "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/*.txt"}, {"split": "test", "path": "laws4.txt"}]}]}
2024-01-31T04:39:51+00:00
[]
[ "zh" ]
TAGS #language-Chinese #license-mit #region-us
# Dataset Card for Dataset Name This dataset card aims to be a base template for new datasets. It has been generated using this raw template. ## Dataset Details ### Dataset Description Law documents legislated in China. - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\nLaw documents legislated in China.\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#language-Chinese #license-mit #region-us \n", "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\nLaw documents legislated in China.\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 16, 34, 4, 47, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#language-Chinese #license-mit #region-us \n# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.## Dataset Details### Dataset Description\n\n\nLaw documents legislated in China.\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
a26b53f14ef4c7708858b11beb29ad95bf9a8930
# Dataset Card for Vietnamese Elementary Math Knowledge and Workbook ## Dataset Description - Repository: - Paper: - Point of Contact: [email protected] ### Dataset Summary The data includes information about elementary school math knowledge in Vietnam, as well as exercises compiled from books. This is a crawlable dataset that can be trained for text generation tasks. ### Supported Tasks and Leaderboards ### Languages The majority of the data is in Vietnamese, but there is still some English from some bilingual workbooks. ## Dataset Structure ### Data Instances The data includes information about the page paths we crawled and some text that has been post-processed. The structure will be presented as follows: ```python { "id": "d117388e2d5266a25404674ef61923c3", "url": "https://tech12h.com/bai-hoc/giai-bai-tap-khai-niem-ve-phan-so.html", "title": "Giải bài Ôn tập: khái niệm về phần số", "contents": ["Nội dung bài viết gồm 2 phần:\nÔn tập lý thuyết\nHướng dẫn giải bài tập sgk\nA. Lý thuyết\n$\\frac{4}{5}$: Gọi là phân số - đọc là bốn phần 5\n$\\frac{1}{2}$: gọi là phân số - đọc là một phần 2\nChú ý: \nCó thể dùng phân số để ghi kết quả của phép chia giữa một số tự nhiên cho 1 số tự nhiên khác 0. Phân số đó cũng được gọi là thương của phép chia\nVí dụ:\n1:4 = $\\frac{1}{4}$ \n5: 10 =$\\frac{5}{10}$\nMọi số tự nhiên đều có thể viết thành phân số có mẫu bằng 1\nVí dụ\n5 =$\\frac{5}{1}$\n12 =$\\frac{12}{1}$\nSố 1 có thể viết thành phân số có tử số và mẫu số bằng nhau. Trừ phân số có mẫu = 0\nVí dụ:\n1 =$\\frac{10}{10}$\n1 =$\\frac{34}{34}$\nKhông được viết 1 =$\\frac{0}{0}$\nSố 0 có thể viết thành 1 phân số có tử số = 0. Trừ phân số có mẫu số = 0\nVí dụ\n0 =$\\frac{0}{2}$\n0 =$\\frac{0}{100}$ \nKhông được viết: 0 =$\\frac{0}{0}$", "Câu 1: Trang 4 - sgk toán lớp 5\na). Đọc các phân số sau\n\\(\\frac {5}{7}\\); \\(\\frac {25}{100}\\); \\(\\frac {91}{38}\\); \\(\\frac {60}{17}\\); \\(\\frac {85}{1000}\\);\nb). Nêu tử số và mẫu số của phân số trên\nCâu 2: Trang 4 - sgk toán lớp 5\nViết các thương dưới dạng phân số: 3 : 5; 75 : 100; 9 : 17\nCâu 3: Sgk toán lớp 5 - Trang 4\nViết các số tự nhiên dưới dạng phân số có mẫu số là 1:\n32; 105; 1000.\nCâu 4: Sgk toán lớp 5 - Trang 4\nViết số thích hợp vào chỗ trống\n"]} ``` ### Data Fields Data fields include: - id: id of an text crawl instance. - url: URL path to crawled page. - title: title of crawled page. - contents: a list of text corpus. ## Dataset Creation ### Curation Rationale The data set is built based on developing a model capable of reasoning and solving elementary school math problems, as well as providing mathematical knowledge in the Vietnamese elementary school environment. ### Source Data Data was crawled on tech12h.com, we selected data from grades 1 to 5, selected items including lessons and exercises for students, along with solutions. ## Considerations for Using the Data ### Social Impact of Dataset We believe that efforts in finding data sources will be an opportunity for future artificial intelligence models to develop and have better reasoning capabilities. ### Discussion of Biases ### Other Known Limitations The current data has not been cleaned too well, and there are many incomplete data samples including images and some post-processed tags. ## Additional Information
hllj/vi_math_problem_crawl
[ "task_categories:text-generation", "size_categories:10K<n<100K", "language:vi", "language:en", "license:mit", "vietnamese", "math", "reasoning", "knowledge", "region:us" ]
2023-12-23T02:58:56+00:00
{"language": ["vi", "en"], "license": "mit", "size_categories": ["10K<n<100K"], "task_categories": ["text-generation"], "pretty_name": "Vietnamese Elementary Math Knowledge and Workbook", "tags": ["vietnamese", "math", "reasoning", "knowledge"]}
2023-12-24T06:14:40+00:00
[]
[ "vi", "en" ]
TAGS #task_categories-text-generation #size_categories-10K<n<100K #language-Vietnamese #language-English #license-mit #vietnamese #math #reasoning #knowledge #region-us
# Dataset Card for Vietnamese Elementary Math Knowledge and Workbook ## Dataset Description - Repository: - Paper: - Point of Contact: vanhop3499@URL ### Dataset Summary The data includes information about elementary school math knowledge in Vietnam, as well as exercises compiled from books. This is a crawlable dataset that can be trained for text generation tasks. ### Supported Tasks and Leaderboards ### Languages The majority of the data is in Vietnamese, but there is still some English from some bilingual workbooks. ## Dataset Structure ### Data Instances The data includes information about the page paths we crawled and some text that has been post-processed. The structure will be presented as follows: ### Data Fields Data fields include: - id: id of an text crawl instance. - url: URL path to crawled page. - title: title of crawled page. - contents: a list of text corpus. ## Dataset Creation ### Curation Rationale The data set is built based on developing a model capable of reasoning and solving elementary school math problems, as well as providing mathematical knowledge in the Vietnamese elementary school environment. ### Source Data Data was crawled on URL, we selected data from grades 1 to 5, selected items including lessons and exercises for students, along with solutions. ## Considerations for Using the Data ### Social Impact of Dataset We believe that efforts in finding data sources will be an opportunity for future artificial intelligence models to develop and have better reasoning capabilities. ### Discussion of Biases ### Other Known Limitations The current data has not been cleaned too well, and there are many incomplete data samples including images and some post-processed tags. ## Additional Information
[ "# Dataset Card for Vietnamese Elementary Math Knowledge and Workbook", "## Dataset Description\n- Repository: \n- Paper: \n- Point of Contact: vanhop3499@URL", "### Dataset Summary\nThe data includes information about elementary school math knowledge in Vietnam, as well as exercises compiled from books. This is a crawlable dataset that can be trained for text generation tasks.", "### Supported Tasks and Leaderboards", "### Languages\n\nThe majority of the data is in Vietnamese, but there is still some English from some bilingual workbooks.", "## Dataset Structure", "### Data Instances\n\nThe data includes information about the page paths we crawled and some text that has been post-processed. The structure will be presented as follows:", "### Data Fields\n\nData fields include:\n\n- id: id of an text crawl instance.\n- url: URL path to crawled page.\n- title: title of crawled page.\n- contents: a list of text corpus.", "## Dataset Creation", "### Curation Rationale\n\nThe data set is built based on developing a model capable of reasoning and solving elementary school math problems, as well as providing mathematical knowledge in the Vietnamese elementary school environment.", "### Source Data\n\nData was crawled on URL, we selected data from grades 1 to 5, selected items including lessons and exercises for students, along with solutions.", "## Considerations for Using the Data", "### Social Impact of Dataset\n\nWe believe that efforts in finding data sources will be an opportunity for future artificial intelligence models to develop and have better reasoning capabilities.", "### Discussion of Biases", "### Other Known Limitations\n\nThe current data has not been cleaned too well, and there are many incomplete data samples including images and some post-processed tags.", "## Additional Information" ]
[ "TAGS\n#task_categories-text-generation #size_categories-10K<n<100K #language-Vietnamese #language-English #license-mit #vietnamese #math #reasoning #knowledge #region-us \n", "# Dataset Card for Vietnamese Elementary Math Knowledge and Workbook", "## Dataset Description\n- Repository: \n- Paper: \n- Point of Contact: vanhop3499@URL", "### Dataset Summary\nThe data includes information about elementary school math knowledge in Vietnam, as well as exercises compiled from books. This is a crawlable dataset that can be trained for text generation tasks.", "### Supported Tasks and Leaderboards", "### Languages\n\nThe majority of the data is in Vietnamese, but there is still some English from some bilingual workbooks.", "## Dataset Structure", "### Data Instances\n\nThe data includes information about the page paths we crawled and some text that has been post-processed. The structure will be presented as follows:", "### Data Fields\n\nData fields include:\n\n- id: id of an text crawl instance.\n- url: URL path to crawled page.\n- title: title of crawled page.\n- contents: a list of text corpus.", "## Dataset Creation", "### Curation Rationale\n\nThe data set is built based on developing a model capable of reasoning and solving elementary school math problems, as well as providing mathematical knowledge in the Vietnamese elementary school environment.", "### Source Data\n\nData was crawled on URL, we selected data from grades 1 to 5, selected items including lessons and exercises for students, along with solutions.", "## Considerations for Using the Data", "### Social Impact of Dataset\n\nWe believe that efforts in finding data sources will be an opportunity for future artificial intelligence models to develop and have better reasoning capabilities.", "### Discussion of Biases", "### Other Known Limitations\n\nThe current data has not been cleaned too well, and there are many incomplete data samples including images and some post-processed tags.", "## Additional Information" ]
[ 58, 14, 23, 48, 10, 28, 6, 39, 50, 5, 47, 35, 8, 35, 8, 37, 5 ]
[ "passage: TAGS\n#task_categories-text-generation #size_categories-10K<n<100K #language-Vietnamese #language-English #license-mit #vietnamese #math #reasoning #knowledge #region-us \n# Dataset Card for Vietnamese Elementary Math Knowledge and Workbook## Dataset Description\n- Repository: \n- Paper: \n- Point of Contact: vanhop3499@URL### Dataset Summary\nThe data includes information about elementary school math knowledge in Vietnam, as well as exercises compiled from books. This is a crawlable dataset that can be trained for text generation tasks.### Supported Tasks and Leaderboards### Languages\n\nThe majority of the data is in Vietnamese, but there is still some English from some bilingual workbooks.## Dataset Structure### Data Instances\n\nThe data includes information about the page paths we crawled and some text that has been post-processed. The structure will be presented as follows:### Data Fields\n\nData fields include:\n\n- id: id of an text crawl instance.\n- url: URL path to crawled page.\n- title: title of crawled page.\n- contents: a list of text corpus.## Dataset Creation### Curation Rationale\n\nThe data set is built based on developing a model capable of reasoning and solving elementary school math problems, as well as providing mathematical knowledge in the Vietnamese elementary school environment.### Source Data\n\nData was crawled on URL, we selected data from grades 1 to 5, selected items including lessons and exercises for students, along with solutions.## Considerations for Using the Data### Social Impact of Dataset\n\nWe believe that efforts in finding data sources will be an opportunity for future artificial intelligence models to develop and have better reasoning capabilities.### Discussion of Biases### Other Known Limitations\n\nThe current data has not been cleaned too well, and there are many incomplete data samples including images and some post-processed tags.## Additional Information" ]
2eac2ac592afe35cea1fd8584dd14cf2ee3c9edd
# Dataset Card for Vietnamese Grade School Math Dataset ## Dataset Description - Repository: - Paper: - Point of Contact: [email protected] ### Dataset Summary The dataset includes multiple-choice math exercises for elementary school students from grades 1 to 5 in Vietnam. ### Supported Tasks and Leaderboards ### Languages The majority of the data is in Vietnamese. ## Dataset Structure ### Data Instances The data includes information about the page paths we crawled and some text that has been post-processed. The structure will be presented as follows: ```python { "id": "f9decb7530da8097ebca80315928825e", "question": "Câu 2: Trang 21 - sgk toán lớp 5\nMột gia đình gồm 3 người (bố, mẹ và một con). Bình quân thu nhập hàng tháng 800 000 đồng mỗi người. Nếu gia đình đó có thêm một con nữa mà tổng thu nhập của gia đình không thay đổi thì bình quân thu nhập hàng tháng của mỗi người giảm đi bao nhiêu tiền?", "explanation": "Tổng thu hập bình quân một tháng của gia đình đó là:\n800000 x 3 = 2400000 ( đồng)\nSau khi thêm một người, thu nhập trung bình của một người trong gia đình là:\n2400000 : 4 = 600000 ( đồng)\nVậy so với trước đó, thu nhập bình quân mỗi tháng của một người đã giảm đi:\n800000 - 600000 = 200000 ( đồng)\nĐáp án: 200000 đồng.", "choices": [ "A. 180000 đồng.", "B. 250000 đồng.", "C. 220000 đồng.", "D. 200000 đồng." ], "answer": "D. 200000 đồng." } ``` ### Data Fields Data fields include: - id: id of an question instance. - question: Multiple choice question text - explanation: explanation for how to find the answer to the question - choices: 4 choices A, B, C, D for the question. - answer: one of 4 choices, the final answer. ## Dataset Creation ### Curation Rationale The data set is built based on developing a model capable of reasoning and solving elementary school math problems, as well as providing mathematical knowledge in the Vietnamese elementary school environment. ### Source Data Data was crawled on khoahoc.vietjack.com, we selected data from grades 1 to 5, selected all exams for maths. ## Considerations for Using the Data ### Social Impact of Dataset We believe that efforts in finding data sources will be an opportunity for future artificial intelligence models to develop and have better reasoning capabilities. ### Discussion of Biases ### Other Known Limitations The current data has not been cleaned too well, and there are many incomplete data samples including images and some post-processed tags. ## Additional Information
hllj/vi_grade_school_math_mcq
[ "task_categories:text-generation", "task_categories:text2text-generation", "task_categories:multiple-choice", "size_categories:1K<n<10K", "language:vi", "license:mit", "vietnamese", "math", "reasoning", "region:us" ]
2023-12-23T03:09:08+00:00
{"language": ["vi"], "license": "mit", "size_categories": ["1K<n<10K"], "task_categories": ["text-generation", "text2text-generation", "multiple-choice"], "pretty_name": "Vietnamese Grade School Math Dataset", "tags": ["vietnamese", "math", "reasoning"]}
2023-12-27T06:08:05+00:00
[]
[ "vi" ]
TAGS #task_categories-text-generation #task_categories-text2text-generation #task_categories-multiple-choice #size_categories-1K<n<10K #language-Vietnamese #license-mit #vietnamese #math #reasoning #region-us
# Dataset Card for Vietnamese Grade School Math Dataset ## Dataset Description - Repository: - Paper: - Point of Contact: vanhop3499@URL ### Dataset Summary The dataset includes multiple-choice math exercises for elementary school students from grades 1 to 5 in Vietnam. ### Supported Tasks and Leaderboards ### Languages The majority of the data is in Vietnamese. ## Dataset Structure ### Data Instances The data includes information about the page paths we crawled and some text that has been post-processed. The structure will be presented as follows: ### Data Fields Data fields include: - id: id of an question instance. - question: Multiple choice question text - explanation: explanation for how to find the answer to the question - choices: 4 choices A, B, C, D for the question. - answer: one of 4 choices, the final answer. ## Dataset Creation ### Curation Rationale The data set is built based on developing a model capable of reasoning and solving elementary school math problems, as well as providing mathematical knowledge in the Vietnamese elementary school environment. ### Source Data Data was crawled on URL, we selected data from grades 1 to 5, selected all exams for maths. ## Considerations for Using the Data ### Social Impact of Dataset We believe that efforts in finding data sources will be an opportunity for future artificial intelligence models to develop and have better reasoning capabilities. ### Discussion of Biases ### Other Known Limitations The current data has not been cleaned too well, and there are many incomplete data samples including images and some post-processed tags. ## Additional Information
[ "# Dataset Card for Vietnamese Grade School Math Dataset", "## Dataset Description\n- Repository: \n- Paper: \n- Point of Contact: vanhop3499@URL", "### Dataset Summary\nThe dataset includes multiple-choice math exercises for elementary school students from grades 1 to 5 in Vietnam.", "### Supported Tasks and Leaderboards", "### Languages\n\nThe majority of the data is in Vietnamese.", "## Dataset Structure", "### Data Instances\n\nThe data includes information about the page paths we crawled and some text that has been post-processed. The structure will be presented as follows:", "### Data Fields\n\nData fields include:\n\n- id: id of an question instance.\n- question: Multiple choice question text\n- explanation: explanation for how to find the answer to the question\n- choices: 4 choices A, B, C, D for the question.\n- answer: one of 4 choices, the final answer.", "## Dataset Creation", "### Curation Rationale\n\nThe data set is built based on developing a model capable of reasoning and solving elementary school math problems, as well as providing mathematical knowledge in the Vietnamese elementary school environment.", "### Source Data\n\nData was crawled on URL, we selected data from grades 1 to 5, selected all exams for maths.", "## Considerations for Using the Data", "### Social Impact of Dataset\n\nWe believe that efforts in finding data sources will be an opportunity for future artificial intelligence models to develop and have better reasoning capabilities.", "### Discussion of Biases", "### Other Known Limitations\n\nThe current data has not been cleaned too well, and there are many incomplete data samples including images and some post-processed tags.", "## Additional Information" ]
[ "TAGS\n#task_categories-text-generation #task_categories-text2text-generation #task_categories-multiple-choice #size_categories-1K<n<10K #language-Vietnamese #license-mit #vietnamese #math #reasoning #region-us \n", "# Dataset Card for Vietnamese Grade School Math Dataset", "## Dataset Description\n- Repository: \n- Paper: \n- Point of Contact: vanhop3499@URL", "### Dataset Summary\nThe dataset includes multiple-choice math exercises for elementary school students from grades 1 to 5 in Vietnam.", "### Supported Tasks and Leaderboards", "### Languages\n\nThe majority of the data is in Vietnamese.", "## Dataset Structure", "### Data Instances\n\nThe data includes information about the page paths we crawled and some text that has been post-processed. The structure will be presented as follows:", "### Data Fields\n\nData fields include:\n\n- id: id of an question instance.\n- question: Multiple choice question text\n- explanation: explanation for how to find the answer to the question\n- choices: 4 choices A, B, C, D for the question.\n- answer: one of 4 choices, the final answer.", "## Dataset Creation", "### Curation Rationale\n\nThe data set is built based on developing a model capable of reasoning and solving elementary school math problems, as well as providing mathematical knowledge in the Vietnamese elementary school environment.", "### Source Data\n\nData was crawled on URL, we selected data from grades 1 to 5, selected all exams for maths.", "## Considerations for Using the Data", "### Social Impact of Dataset\n\nWe believe that efforts in finding data sources will be an opportunity for future artificial intelligence models to develop and have better reasoning capabilities.", "### Discussion of Biases", "### Other Known Limitations\n\nThe current data has not been cleaned too well, and there are many incomplete data samples including images and some post-processed tags.", "## Additional Information" ]
[ 76, 13, 23, 31, 10, 14, 6, 39, 71, 5, 47, 29, 8, 35, 8, 37, 5 ]
[ "passage: TAGS\n#task_categories-text-generation #task_categories-text2text-generation #task_categories-multiple-choice #size_categories-1K<n<10K #language-Vietnamese #license-mit #vietnamese #math #reasoning #region-us \n# Dataset Card for Vietnamese Grade School Math Dataset## Dataset Description\n- Repository: \n- Paper: \n- Point of Contact: vanhop3499@URL### Dataset Summary\nThe dataset includes multiple-choice math exercises for elementary school students from grades 1 to 5 in Vietnam.### Supported Tasks and Leaderboards### Languages\n\nThe majority of the data is in Vietnamese.## Dataset Structure### Data Instances\n\nThe data includes information about the page paths we crawled and some text that has been post-processed. The structure will be presented as follows:### Data Fields\n\nData fields include:\n\n- id: id of an question instance.\n- question: Multiple choice question text\n- explanation: explanation for how to find the answer to the question\n- choices: 4 choices A, B, C, D for the question.\n- answer: one of 4 choices, the final answer.## Dataset Creation### Curation Rationale\n\nThe data set is built based on developing a model capable of reasoning and solving elementary school math problems, as well as providing mathematical knowledge in the Vietnamese elementary school environment.### Source Data\n\nData was crawled on URL, we selected data from grades 1 to 5, selected all exams for maths.## Considerations for Using the Data### Social Impact of Dataset\n\nWe believe that efforts in finding data sources will be an opportunity for future artificial intelligence models to develop and have better reasoning capabilities.### Discussion of Biases### Other Known Limitations\n\nThe current data has not been cleaned too well, and there are many incomplete data samples including images and some post-processed tags.## Additional Information" ]
24412404652d67950e6f1e453ac15c1cf3601259
![image/png](https://cdn-uploads.huggingface.co/production/uploads/63dc683562dc193e6d45ceb3/zwEIeZSMQ-x9cUH93P23m.png) # Gore Blood Dataset (Version 1.0) ## Overview The Gore Blood Dataset (Version 1.0) is a collection of images curated by NeuralShell specifically designed for training AI models, particularly for stable diffusion models. These images are intended to aid in the development and enhancement of machine learning models, leveraging the advancements in the field of computer vision and AI. ## Dataset Information - **Dataset Name**: Gore-Blood-Dataset-v1.0 - **Creator**: NeuralShell - **Base Model Version**: sd v2.1 - **AI Refiners Version**: sd v1.5 ## Purpose This dataset serves as a resource to train AI models, particularly focusing on stable diffusion models within the realm of computer vision. It contains images pertinent to blood-related visual data, curated and optimized using the base model version sd v2.1 and AI refiners version sd v1.5. ## Contents The dataset comprises a diverse collection of Gore images related to blood, meticulously chosen and preprocessed to facilitate robust model training. It is a valuable resource for researchers and developers aiming to advance the capabilities of AI in understanding and interpreting blood-related visual information. ## Usage This dataset can be utilized for various purposes within the field of computer vision and machine learning, including but not limited to: - Training stable diffusion models - Experimentation and research in AI development - Benchmarking and evaluation of new algorithms and models ## Acknowledgments We would like to express our gratitude to the contributors and researchers involved in the creation and curation of this dataset. Their efforts have enabled the availability of this resource for the wider AI and machine learning community. ## Citation If you use this dataset in your research or work, kindly cite it using the following format: ``` @dataset{Gore-Blood-Dataset-v1.0, author = {NeuralShell}, title = {Gore Blood Dataset}, year = {2023}, publisher = {Hugging Face}, version = {1.0}, url = {https://huggingface.co/NeuralShell/Gore-Blood-Dataset-v1.0} } ``` ## License This dataset is provided under the specified license terms by NeuralShell. Please refer to the LICENSE file accompanying the dataset for detailed information on permitted usage and redistribution.
NeuralShell/Gore-Blood-Dataset-v1.0
[ "task_categories:image-to-image", "task_categories:image-classification", "task_categories:image-segmentation", "size_categories:n<1K", "language:en", "license:mit", "art", "blood", "death", "not-for-all-audiences", "region:us" ]
2023-12-23T03:48:01+00:00
{"language": ["en"], "license": "mit", "size_categories": ["n<1K"], "task_categories": ["image-to-image", "image-classification", "image-segmentation"], "pretty_name": "gore-blood", "tags": ["art", "blood", "death", "not-for-all-audiences"]}
2023-12-23T17:52:42+00:00
[]
[ "en" ]
TAGS #task_categories-image-to-image #task_categories-image-classification #task_categories-image-segmentation #size_categories-n<1K #language-English #license-mit #art #blood #death #not-for-all-audiences #region-us
!image/png # Gore Blood Dataset (Version 1.0) ## Overview The Gore Blood Dataset (Version 1.0) is a collection of images curated by NeuralShell specifically designed for training AI models, particularly for stable diffusion models. These images are intended to aid in the development and enhancement of machine learning models, leveraging the advancements in the field of computer vision and AI. ## Dataset Information - Dataset Name: Gore-Blood-Dataset-v1.0 - Creator: NeuralShell - Base Model Version: sd v2.1 - AI Refiners Version: sd v1.5 ## Purpose This dataset serves as a resource to train AI models, particularly focusing on stable diffusion models within the realm of computer vision. It contains images pertinent to blood-related visual data, curated and optimized using the base model version sd v2.1 and AI refiners version sd v1.5. ## Contents The dataset comprises a diverse collection of Gore images related to blood, meticulously chosen and preprocessed to facilitate robust model training. It is a valuable resource for researchers and developers aiming to advance the capabilities of AI in understanding and interpreting blood-related visual information. ## Usage This dataset can be utilized for various purposes within the field of computer vision and machine learning, including but not limited to: - Training stable diffusion models - Experimentation and research in AI development - Benchmarking and evaluation of new algorithms and models ## Acknowledgments We would like to express our gratitude to the contributors and researchers involved in the creation and curation of this dataset. Their efforts have enabled the availability of this resource for the wider AI and machine learning community. If you use this dataset in your research or work, kindly cite it using the following format: ## License This dataset is provided under the specified license terms by NeuralShell. Please refer to the LICENSE file accompanying the dataset for detailed information on permitted usage and redistribution.
[ "# Gore Blood Dataset (Version 1.0)", "## Overview\nThe Gore Blood Dataset (Version 1.0) is a collection of images curated by NeuralShell specifically designed for training AI models, particularly for stable diffusion models. These images are intended to aid in the development and enhancement of machine learning models, leveraging the advancements in the field of computer vision and AI.", "## Dataset Information\n- Dataset Name: Gore-Blood-Dataset-v1.0\n- Creator: NeuralShell\n- Base Model Version: sd v2.1\n- AI Refiners Version: sd v1.5", "## Purpose\nThis dataset serves as a resource to train AI models, particularly focusing on stable diffusion models within the realm of computer vision. It contains images pertinent to blood-related visual data, curated and optimized using the base model version sd v2.1 and AI refiners version sd v1.5.", "## Contents\nThe dataset comprises a diverse collection of Gore images related to blood, meticulously chosen and preprocessed to facilitate robust model training. It is a valuable resource for researchers and developers aiming to advance the capabilities of AI in understanding and interpreting blood-related visual information.", "## Usage\nThis dataset can be utilized for various purposes within the field of computer vision and machine learning, including but not limited to:\n- Training stable diffusion models\n- Experimentation and research in AI development\n- Benchmarking and evaluation of new algorithms and models", "## Acknowledgments\nWe would like to express our gratitude to the contributors and researchers involved in the creation and curation of this dataset. Their efforts have enabled the availability of this resource for the wider AI and machine learning community.\n\nIf you use this dataset in your research or work, kindly cite it using the following format:", "## License\nThis dataset is provided under the specified license terms by NeuralShell. Please refer to the LICENSE file accompanying the dataset for detailed information on permitted usage and redistribution." ]
[ "TAGS\n#task_categories-image-to-image #task_categories-image-classification #task_categories-image-segmentation #size_categories-n<1K #language-English #license-mit #art #blood #death #not-for-all-audiences #region-us \n", "# Gore Blood Dataset (Version 1.0)", "## Overview\nThe Gore Blood Dataset (Version 1.0) is a collection of images curated by NeuralShell specifically designed for training AI models, particularly for stable diffusion models. These images are intended to aid in the development and enhancement of machine learning models, leveraging the advancements in the field of computer vision and AI.", "## Dataset Information\n- Dataset Name: Gore-Blood-Dataset-v1.0\n- Creator: NeuralShell\n- Base Model Version: sd v2.1\n- AI Refiners Version: sd v1.5", "## Purpose\nThis dataset serves as a resource to train AI models, particularly focusing on stable diffusion models within the realm of computer vision. It contains images pertinent to blood-related visual data, curated and optimized using the base model version sd v2.1 and AI refiners version sd v1.5.", "## Contents\nThe dataset comprises a diverse collection of Gore images related to blood, meticulously chosen and preprocessed to facilitate robust model training. It is a valuable resource for researchers and developers aiming to advance the capabilities of AI in understanding and interpreting blood-related visual information.", "## Usage\nThis dataset can be utilized for various purposes within the field of computer vision and machine learning, including but not limited to:\n- Training stable diffusion models\n- Experimentation and research in AI development\n- Benchmarking and evaluation of new algorithms and models", "## Acknowledgments\nWe would like to express our gratitude to the contributors and researchers involved in the creation and curation of this dataset. Their efforts have enabled the availability of this resource for the wider AI and machine learning community.\n\nIf you use this dataset in your research or work, kindly cite it using the following format:", "## License\nThis dataset is provided under the specified license terms by NeuralShell. Please refer to the LICENSE file accompanying the dataset for detailed information on permitted usage and redistribution." ]
[ 77, 9, 69, 46, 69, 65, 57, 75, 44 ]
[ "passage: TAGS\n#task_categories-image-to-image #task_categories-image-classification #task_categories-image-segmentation #size_categories-n<1K #language-English #license-mit #art #blood #death #not-for-all-audiences #region-us \n# Gore Blood Dataset (Version 1.0)## Overview\nThe Gore Blood Dataset (Version 1.0) is a collection of images curated by NeuralShell specifically designed for training AI models, particularly for stable diffusion models. These images are intended to aid in the development and enhancement of machine learning models, leveraging the advancements in the field of computer vision and AI.## Dataset Information\n- Dataset Name: Gore-Blood-Dataset-v1.0\n- Creator: NeuralShell\n- Base Model Version: sd v2.1\n- AI Refiners Version: sd v1.5## Purpose\nThis dataset serves as a resource to train AI models, particularly focusing on stable diffusion models within the realm of computer vision. It contains images pertinent to blood-related visual data, curated and optimized using the base model version sd v2.1 and AI refiners version sd v1.5.## Contents\nThe dataset comprises a diverse collection of Gore images related to blood, meticulously chosen and preprocessed to facilitate robust model training. It is a valuable resource for researchers and developers aiming to advance the capabilities of AI in understanding and interpreting blood-related visual information.## Usage\nThis dataset can be utilized for various purposes within the field of computer vision and machine learning, including but not limited to:\n- Training stable diffusion models\n- Experimentation and research in AI development\n- Benchmarking and evaluation of new algorithms and models## Acknowledgments\nWe would like to express our gratitude to the contributors and researchers involved in the creation and curation of this dataset. Their efforts have enabled the availability of this resource for the wider AI and machine learning community.\n\nIf you use this dataset in your research or work, kindly cite it using the following format:" ]
6bdbac247e4b8504c3537aec8d51578c6885fe0a
# Dataset Card for "beauty_baby_hpc_grocery_computer_kitchen" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
nitinbhayana/beauty_baby_hpc_grocery_computer_kitchen
[ "region:us" ]
2023-12-23T05:25:29+00:00
{"dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4532054, "num_examples": 31389}, {"name": "test", "num_bytes": 2007802, "num_examples": 13873}], "download_size": 3048204, "dataset_size": 6539856}}
2023-12-23T05:25:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for "beauty_baby_hpc_grocery_computer_kitchen" More Information needed
[ "# Dataset Card for \"beauty_baby_hpc_grocery_computer_kitchen\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"beauty_baby_hpc_grocery_computer_kitchen\"\n\nMore Information needed" ]
[ 6, 25 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"beauty_baby_hpc_grocery_computer_kitchen\"\n\nMore Information needed" ]
43cd31f0c07acfdb8b418baa4ff83a15e05deb07
# feasibility_qa Feasibility question-answering dataset. ## Dataset Details ### Dataset Description FeasibilityQA is a dataset consisting of questions that require an understanding of feasibility. This dataset comprises of two types of questions: binary classification (BCQ) and multichoice multi-correct questions (MCQ). In BCQ, the task is to determine whether the question is feasible or not given a context; in MCQ, the task is to select all feasible answers to the given question. ### Dataset Sources <!-- Provide the basic links for the dataset. --> - **Repository:** https://github.com/kevinscaria/feasibilityQA - **Paper:** "John is 50 years old, can his son be 65?" Evaluating NLP Models' Understanding of Feasibility. ([Gupta et al., 2022](https://arxiv.org/abs/2210.07471)) ## Citation <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> ```bibtex @inproceedings{gupta-etal-2023-john, title = "{``}John is 50 years old, can his son be 65?{''} Evaluating {NLP} Models{'} Understanding of Feasibility", author = "Gupta, Himanshu and Varshney, Neeraj and Mishra, Swaroop and Pal, Kuntal Kumar and Sawant, Saurabh Arjun and Scaria, Kevin and Goyal, Siddharth and Baral, Chitta", editor = "Vlachos, Andreas and Augenstein, Isabelle", booktitle = "Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics", month = may, year = "2023", address = "Dubrovnik, Croatia", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.eacl-main.30", doi = "10.18653/v1/2023.eacl-main.30", pages = "407--417", abstract = "In current NLP research, large-scale language models and their abilities are widely being discussed. Some recent works have also found notable failures of these models. Often these failure examples involve complex reasoning abilities. This work focuses on a simple commonsense ability, reasoning about when an action (or its effect) is feasible. To this end, we introduce FeasibilityQA, a question-answering dataset involving binary classification (BCQ) and multi-choice multi-correct questions (MCQ) that test understanding of feasibility. We show that even state-of-the-art models such as GPT-3, GPT-2, and T5 struggle to answer the feasibility questions correctly. Specifically, on (MCQ, BCQ) questions, GPT-3 achieves accuracy of just (19{\%}, 62{\%}) and (25{\%}, 64{\%}) in zero-shot and few-shot settings, respectively. We also evaluate models by providing relevant knowledge statements required to answer the question and find that the additional knowledge leads to a 7{\%} gain in performance, but the overall performance still remains low. These results make one wonder how much commonsense knowledge about action feasibility is encoded in state-of-the-art models and how well they can reason about it.", } ```
jon-tow/feasibility_qa
[ "license:mit", "arxiv:2210.07471", "region:us" ]
2023-12-23T05:56:34+00:00
{"license": "mit", "configs": [{"config_name": "bcq", "data_files": "FeasibilityQA_dataset_BCQ.csv"}, {"config_name": "mcq", "data_files": "FeasibilityQA_dataset_MCQ.csv"}]}
2023-12-24T04:18:28+00:00
[ "2210.07471" ]
[]
TAGS #license-mit #arxiv-2210.07471 #region-us
# feasibility_qa Feasibility question-answering dataset. ## Dataset Details ### Dataset Description FeasibilityQA is a dataset consisting of questions that require an understanding of feasibility. This dataset comprises of two types of questions: binary classification (BCQ) and multichoice multi-correct questions (MCQ). In BCQ, the task is to determine whether the question is feasible or not given a context; in MCQ, the task is to select all feasible answers to the given question. ### Dataset Sources - Repository: URL - Paper: "John is 50 years old, can his son be 65?" Evaluating NLP Models' Understanding of Feasibility. (Gupta et al., 2022)
[ "# feasibility_qa\n\nFeasibility question-answering dataset.", "## Dataset Details", "### Dataset Description\n\nFeasibilityQA is a dataset consisting of questions that require an understanding of feasibility. This dataset comprises of two types of questions: binary classification (BCQ) and multichoice multi-correct questions (MCQ). In BCQ, the task is to determine whether the question is feasible or not given a context; in MCQ, the task is to select all feasible answers to the given question.", "### Dataset Sources\n\n\n\n- Repository: URL\n- Paper: \"John is 50 years old, can his son be 65?\" Evaluating NLP Models' Understanding of Feasibility. (Gupta et al., 2022)" ]
[ "TAGS\n#license-mit #arxiv-2210.07471 #region-us \n", "# feasibility_qa\n\nFeasibility question-answering dataset.", "## Dataset Details", "### Dataset Description\n\nFeasibilityQA is a dataset consisting of questions that require an understanding of feasibility. This dataset comprises of two types of questions: binary classification (BCQ) and multichoice multi-correct questions (MCQ). In BCQ, the task is to determine whether the question is feasible or not given a context; in MCQ, the task is to select all feasible answers to the given question.", "### Dataset Sources\n\n\n\n- Repository: URL\n- Paper: \"John is 50 years old, can his son be 65?\" Evaluating NLP Models' Understanding of Feasibility. (Gupta et al., 2022)" ]
[ 20, 16, 4, 100, 51 ]
[ "passage: TAGS\n#license-mit #arxiv-2210.07471 #region-us \n# feasibility_qa\n\nFeasibility question-answering dataset.## Dataset Details### Dataset Description\n\nFeasibilityQA is a dataset consisting of questions that require an understanding of feasibility. This dataset comprises of two types of questions: binary classification (BCQ) and multichoice multi-correct questions (MCQ). In BCQ, the task is to determine whether the question is feasible or not given a context; in MCQ, the task is to select all feasible answers to the given question.### Dataset Sources\n\n\n\n- Repository: URL\n- Paper: \"John is 50 years old, can his son be 65?\" Evaluating NLP Models' Understanding of Feasibility. (Gupta et al., 2022)" ]
c8b54e70bfbdd2fd6be037557e6feaf1abf14f5d
- Original Dataset: [glaiveai/glaive-function-calling-v2](https://huggingface.co/datasets/glaiveai/glaive-function-calling-v2) - ChatGPT를 이용해서 번역, 전체 데이터셋 중 15000개만 번역됨 - Prompt: ``` You are a Korean translator. Data in the format of a given json array contains conversations between user and assistant. Each element in the array has roles and contents. You must translate the content value of the element when the role is user or assistant. You must also meet the following conditions. 1. The result must be preserved in json format. 2. The tone of the translated text should be a natural everyday conversation tone. 3. The translation content should not include the content that you are translating. ``` - 이후 데이터를 json 포멧으로 통째로 전달
heegyu/glaive-function-calling-v2-ko
[ "license:apache-2.0", "region:us" ]
2023-12-23T06:06:58+00:00
{"license": "apache-2.0"}
2024-01-08T12:32:32+00:00
[]
[]
TAGS #license-apache-2.0 #region-us
- Original Dataset: glaiveai/glaive-function-calling-v2 - ChatGPT를 이용해서 번역, 전체 데이터셋 중 15000개만 번역됨 - Prompt: - 이후 데이터를 json 포멧으로 통째로 전달
[]
[ "TAGS\n#license-apache-2.0 #region-us \n" ]
[ 14 ]
[ "passage: TAGS\n#license-apache-2.0 #region-us \n" ]
cc6aa23f3e8f10cd559b0da2deed5238e6d658e5
- Original Dataset: [glaiveai/glaive-function-calling-v2](https://huggingface.co/datasets/glaiveai/glaive-function-calling-v2) - ChatGPT를 이용해서 번역, 전체 데이터셋 중 15000개만 번역됨 - Prompt: ``` You are a Korean translator. Data in the format of a given json array contains conversations between user and assistant. Each element in the array has roles and contents. You must translate the content value of the element when the role is user or assistant. You must also meet the following conditions. 1. The result must be preserved in json format. 2. The tone of the translated text should be a natural everyday conversation tone. 3. The translation content should not include the content that you are translating. ``` - 이후 데이터를 json 포멧으로 통째로 전달
heegyu/glaive-function-calling-v2-ko-mt
[ "license:apache-2.0", "region:us" ]
2023-12-23T06:38:31+00:00
{"license": "apache-2.0"}
2023-12-23T13:32:59+00:00
[]
[]
TAGS #license-apache-2.0 #region-us
- Original Dataset: glaiveai/glaive-function-calling-v2 - ChatGPT를 이용해서 번역, 전체 데이터셋 중 15000개만 번역됨 - Prompt: - 이후 데이터를 json 포멧으로 통째로 전달
[]
[ "TAGS\n#license-apache-2.0 #region-us \n" ]
[ 14 ]
[ "passage: TAGS\n#license-apache-2.0 #region-us \n" ]
089695c834a7deb60505b7cc506672db1c31a6aa
# The Oxford-IIIT Pet Dataset ## Description A 37 category pet dataset with roughly 200 images for each class. The images have a large variations in scale, pose and lighting. This instance of the dataset uses standard label ordering and includes the standard train/test splits. Trimaps and bbox are not included, but there is an `image_id` field that can be used to reference those annotations from official metadata. Website: https://www.robots.ox.ac.uk/~vgg/data/pets/ ## Citation ```bibtex @InProceedings{parkhi12a, author = "Omkar M. Parkhi and Andrea Vedaldi and Andrew Zisserman and C. V. Jawahar", title = "Cats and Dogs", booktitle = "IEEE Conference on Computer Vision and Pattern Recognition", year = "2012", } ```
timm/oxford-iiit-pet
[ "task_categories:image-classification", "size_categories:1K<n<10K", "license:cc-by-sa-4.0", "region:us" ]
2023-12-23T07:19:25+00:00
{"license": "cc-by-sa-4.0", "size_categories": ["1K<n<10K"], "task_categories": ["image-classification"], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "abyssinian", "1": "american_bulldog", "2": "american_pit_bull_terrier", "3": "basset_hound", "4": "beagle", "5": "bengal", "6": "birman", "7": "bombay", "8": "boxer", "9": "british_shorthair", "10": "chihuahua", "11": "egyptian_mau", "12": "english_cocker_spaniel", "13": "english_setter", "14": "german_shorthaired", "15": "great_pyrenees", "16": "havanese", "17": "japanese_chin", "18": "keeshond", "19": "leonberger", "20": "maine_coon", "21": "miniature_pinscher", "22": "newfoundland", "23": "persian", "24": "pomeranian", "25": "pug", "26": "ragdoll", "27": "russian_blue", "28": "saint_bernard", "29": "samoyed", "30": "scottish_terrier", "31": "shiba_inu", "32": "siamese", "33": "sphynx", "34": "staffordshire_bull_terrier", "35": "wheaten_terrier", "36": "yorkshire_terrier"}}}}, {"name": "image_id", "dtype": "string"}, {"name": "label_cat_dog", "dtype": {"class_label": {"names": {"0": "cat", "1": "dog"}}}}], "splits": [{"name": "train", "num_bytes": 376746044.08, "num_examples": 3680}, {"name": "test", "num_bytes": 426902517.206, "num_examples": 3669}], "download_size": 790265316, "dataset_size": 803648561.286}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]}
2024-01-07T18:10:47+00:00
[]
[]
TAGS #task_categories-image-classification #size_categories-1K<n<10K #license-cc-by-sa-4.0 #region-us
# The Oxford-IIIT Pet Dataset ## Description A 37 category pet dataset with roughly 200 images for each class. The images have a large variations in scale, pose and lighting. This instance of the dataset uses standard label ordering and includes the standard train/test splits. Trimaps and bbox are not included, but there is an 'image_id' field that can be used to reference those annotations from official metadata. Website: URL
[ "# The Oxford-IIIT Pet Dataset", "## Description\nA 37 category pet dataset with roughly 200 images for each class. The images have a large variations in scale, pose and lighting.\n\nThis instance of the dataset uses standard label ordering and includes the standard train/test splits. Trimaps and bbox are not included, but there is an 'image_id' field that can be used to reference those annotations from official metadata.\n\nWebsite: URL" ]
[ "TAGS\n#task_categories-image-classification #size_categories-1K<n<10K #license-cc-by-sa-4.0 #region-us \n", "# The Oxford-IIIT Pet Dataset", "## Description\nA 37 category pet dataset with roughly 200 images for each class. The images have a large variations in scale, pose and lighting.\n\nThis instance of the dataset uses standard label ordering and includes the standard train/test splits. Trimaps and bbox are not included, but there is an 'image_id' field that can be used to reference those annotations from official metadata.\n\nWebsite: URL" ]
[ 40, 9, 92 ]
[ "passage: TAGS\n#task_categories-image-classification #size_categories-1K<n<10K #license-cc-by-sa-4.0 #region-us \n# The Oxford-IIIT Pet Dataset## Description\nA 37 category pet dataset with roughly 200 images for each class. The images have a large variations in scale, pose and lighting.\n\nThis instance of the dataset uses standard label ordering and includes the standard train/test splits. Trimaps and bbox are not included, but there is an 'image_id' field that can be used to reference those annotations from official metadata.\n\nWebsite: URL" ]
91aa842eab91531906c630c7bf59d35f372b635e
# Dataset Card for "nq_open_validation" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
iohadrubin/nq_open_validation
[ "region:us" ]
2023-12-23T08:04:39+00:00
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 274967, "num_examples": 3610}], "download_size": 182328, "dataset_size": 274967}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-12-23T08:04:43+00:00
[]
[]
TAGS #region-us
# Dataset Card for "nq_open_validation" More Information needed
[ "# Dataset Card for \"nq_open_validation\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"nq_open_validation\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"nq_open_validation\"\n\nMore Information needed" ]
6b70f6f107cc84410d816a4ed1864268d3bf6271
# Dataset Card for "phi-1_5-ml_pretraining" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
teddy-f-47/phi-1_5-ml_pretraining
[ "region:us" ]
2023-12-23T08:14:37+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4419423332.0, "num_examples": 4504239}], "download_size": 2848221228, "dataset_size": 4419423332.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-12-23T10:56:50+00:00
[]
[]
TAGS #region-us
# Dataset Card for "phi-1_5-ml_pretraining" More Information needed
[ "# Dataset Card for \"phi-1_5-ml_pretraining\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"phi-1_5-ml_pretraining\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"phi-1_5-ml_pretraining\"\n\nMore Information needed" ]
b7af2b4b05cff19048ae058004858bcac3395c8c
## Ukrainian Toxicity Dataset This is the first of its kind toxicity classification dataset for the Ukrainian language. Due to the subjective nature of toxicity, definitions of toxic language will vary. We include items that are commonly referred to as vulgar or profane language. ([NLLB paper](https://arxiv.org/pdf/2207.04672.pdf)) ## Dataset formation: 1. Filtering Ukrainian tweets so that only tweets containing toxic language remain with toxic keywords. Source data: https://github.com/saganoren/ukr-twi-corpus 2. Non-toxic sentences were obtained from a previous dataset of tweets as well as sentences from news and fiction from UD Ukrainian IU: https://universaldependencies.org/treebanks/uk_iu/index.html 3. After that, the dataset was split into a train-test-val and all data were balanced both by the toxic/non-toxic criterion and by data source. Labels: 0 - non-toxic, 1 - toxic. ## Load dataset: ``` from datasets import load_dataset dataset = load_dataset("ukr-detect/ukr-toxicity-dataset") ```
ukr-detect/ukr-toxicity-dataset
[ "license:openrail++", "arxiv:2207.04672", "region:us" ]
2023-12-23T08:35:04+00:00
{"license": "openrail++", "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "tags", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 2105604, "num_examples": 12682}, {"name": "validation", "num_bytes": 705759, "num_examples": 4227}, {"name": "test", "num_bytes": 710408, "num_examples": 4214}], "download_size": 2073133, "dataset_size": 3521771}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}]}
2024-02-15T05:49:33+00:00
[ "2207.04672" ]
[]
TAGS #license-openrail++ #arxiv-2207.04672 #region-us
## Ukrainian Toxicity Dataset This is the first of its kind toxicity classification dataset for the Ukrainian language. Due to the subjective nature of toxicity, definitions of toxic language will vary. We include items that are commonly referred to as vulgar or profane language. (NLLB paper) ## Dataset formation: 1. Filtering Ukrainian tweets so that only tweets containing toxic language remain with toxic keywords. Source data: URL 2. Non-toxic sentences were obtained from a previous dataset of tweets as well as sentences from news and fiction from UD Ukrainian IU: URL 3. After that, the dataset was split into a train-test-val and all data were balanced both by the toxic/non-toxic criterion and by data source. Labels: 0 - non-toxic, 1 - toxic. ## Load dataset:
[ "## Ukrainian Toxicity Dataset\n\nThis is the first of its kind toxicity classification dataset for the Ukrainian language.\n\nDue to the subjective nature of toxicity, definitions of toxic language will vary. We include items that are commonly referred to as vulgar or profane language. (NLLB paper)", "## Dataset formation:\n1. Filtering Ukrainian tweets so that only tweets containing toxic language remain with toxic keywords. Source data: URL\n2. Non-toxic sentences were obtained from a previous dataset of tweets as well as sentences from news and fiction from UD Ukrainian IU: URL\n3. After that, the dataset was split into a train-test-val and all data were balanced both by the toxic/non-toxic criterion and by data source.\n\nLabels: 0 - non-toxic, 1 - toxic.", "## Load dataset:" ]
[ "TAGS\n#license-openrail++ #arxiv-2207.04672 #region-us \n", "## Ukrainian Toxicity Dataset\n\nThis is the first of its kind toxicity classification dataset for the Ukrainian language.\n\nDue to the subjective nature of toxicity, definitions of toxic language will vary. We include items that are commonly referred to as vulgar or profane language. (NLLB paper)", "## Dataset formation:\n1. Filtering Ukrainian tweets so that only tweets containing toxic language remain with toxic keywords. Source data: URL\n2. Non-toxic sentences were obtained from a previous dataset of tweets as well as sentences from news and fiction from UD Ukrainian IU: URL\n3. After that, the dataset was split into a train-test-val and all data were balanced both by the toxic/non-toxic criterion and by data source.\n\nLabels: 0 - non-toxic, 1 - toxic.", "## Load dataset:" ]
[ 21, 72, 118, 6 ]
[ "passage: TAGS\n#license-openrail++ #arxiv-2207.04672 #region-us \n## Ukrainian Toxicity Dataset\n\nThis is the first of its kind toxicity classification dataset for the Ukrainian language.\n\nDue to the subjective nature of toxicity, definitions of toxic language will vary. We include items that are commonly referred to as vulgar or profane language. (NLLB paper)## Dataset formation:\n1. Filtering Ukrainian tweets so that only tweets containing toxic language remain with toxic keywords. Source data: URL\n2. Non-toxic sentences were obtained from a previous dataset of tweets as well as sentences from news and fiction from UD Ukrainian IU: URL\n3. After that, the dataset was split into a train-test-val and all data were balanced both by the toxic/non-toxic criterion and by data source.\n\nLabels: 0 - non-toxic, 1 - toxic.## Load dataset:" ]
c6c28e35836a7b60f878ead5dc78844d59df535a
This directory includes a few sample datasets to get you started. * `california_housing_data*.csv` is California housing data from the 1990 US Census; more information is available at: https://developers.google.com/machine-learning/crash-course/california-housing-data-description * `mnist_*.csv` is a small sample of the [MNIST database](https://en.wikipedia.org/wiki/MNIST_database), which is described at: http://yann.lecun.com/exdb/mnist/ * `anscombe.json` contains a copy of [Anscombe's quartet](https://en.wikipedia.org/wiki/Anscombe%27s_quartet); it was originally described in Anscombe, F. J. (1973). 'Graphs in Statistical Analysis'. American Statistician. 27 (1): 17-21. JSTOR 2682899. and our copy was prepared by the [vega_datasets library](https://github.com/altair-viz/vega_datasets/blob/4f67bdaad10f45e3549984e17e1b3088c731503d/vega_datasets/_data/anscombe.json).
jzhuolin/wendang
[ "region:us" ]
2023-12-23T08:57:56+00:00
{}
2023-12-23T09:00:26+00:00
[]
[]
TAGS #region-us
This directory includes a few sample datasets to get you started. * 'california_housing_data*.csv' is California housing data from the 1990 US Census; more information is available at: URL * 'mnist_*.csv' is a small sample of the MNIST database, which is described at: URL * 'URL' contains a copy of Anscombe's quartet; it was originally described in Anscombe, F. J. (1973). 'Graphs in Statistical Analysis'. American Statistician. 27 (1): 17-21. JSTOR 2682899. and our copy was prepared by the vega_datasets library.
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
ad31b21a4317081c8e0592f21861dd57a4abd561
# GIRT-Instruct Corpus Paper: https://arxiv.org/abs/2402.02632 A dataset in the format of pairs of instructions and corresponding outputs. GIRT-Instruct is constructed based on [GIRT-Data](https://arxiv.org/abs/2303.09236), a dataset of IRTs. We use both GIRT-Data metadata and the [Zephyr-7B-Beta](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta) language model to generate the instructions This dataset is used to train the [GIRT-Model](https://huggingface.co/nafisehNik/girt-t5-base) model. - **Model:** [model](https://huggingface.co/nafisehNik/girt-t5-base) - **Space:** [space](https://huggingface.co/spaces/nafisehNik/girt-space) ## Type We have 4 different types in GIRT-Instruct. These types include: - **default:** This type includes instructions with the GIRT-Data metadata. - **default+mask:** This type includes instructions with the GIRT-Data metadata, wherein two fields of information in each instruction are randomly masked. - **default+summary:** This type includes instructions with the GIRT-Data metadata and the field of summary. - **default+summary+mask:** This type includes instructions with the GIRT-Data metadata and the field of summary. Also, two fields of information in each instruction are randomly masked. ## Usage ```python from datasets import load_dataset dataset = load_dataset('nafisehNik/GIRT-Instruct', split='train') print(dataset['train'][0]) # First row of train ``` ## Citation ``` @article{nikeghbal2024girt, title={GIRT-Model: Automated Generation of Issue Report Templates}, author={Nikeghbal, Nafiseh and Kargaran, Amir Hossein and Heydarnoori, Abbas}, journal={arXiv preprint arXiv:2402.02632}, year={2024} } ```
nafisehNik/girt-instruct
[ "task_categories:text2text-generation", "task_categories:text-generation", "language:en", "license:mit", "arxiv:2402.02632", "arxiv:2303.09236", "region:us" ]
2023-12-23T09:48:36+00:00
{"language": ["en"], "license": "mit", "task_categories": ["text2text-generation", "text-generation"], "pretty_name": "GIRT-Instruct"}
2024-02-11T17:25:46+00:00
[ "2402.02632", "2303.09236" ]
[ "en" ]
TAGS #task_categories-text2text-generation #task_categories-text-generation #language-English #license-mit #arxiv-2402.02632 #arxiv-2303.09236 #region-us
# GIRT-Instruct Corpus Paper: URL A dataset in the format of pairs of instructions and corresponding outputs. GIRT-Instruct is constructed based on GIRT-Data, a dataset of IRTs. We use both GIRT-Data metadata and the Zephyr-7B-Beta language model to generate the instructions This dataset is used to train the GIRT-Model model. - Model: model - Space: space ## Type We have 4 different types in GIRT-Instruct. These types include: - default: This type includes instructions with the GIRT-Data metadata. - default+mask: This type includes instructions with the GIRT-Data metadata, wherein two fields of information in each instruction are randomly masked. - default+summary: This type includes instructions with the GIRT-Data metadata and the field of summary. - default+summary+mask: This type includes instructions with the GIRT-Data metadata and the field of summary. Also, two fields of information in each instruction are randomly masked. ## Usage
[ "# GIRT-Instruct Corpus\n\nPaper: URL\n\nA dataset in the format of pairs of instructions and corresponding outputs. GIRT-Instruct is constructed based on GIRT-Data, a dataset of IRTs. \nWe use both GIRT-Data metadata and the Zephyr-7B-Beta language model to generate the instructions\n\n\nThis dataset is used to train the GIRT-Model model.\n\n\n- Model: model\n- Space: space", "## Type \n\nWe have 4 different types in GIRT-Instruct. These types include:\n\n- default: This type includes instructions with the GIRT-Data metadata.\n- default+mask: This type includes instructions with the GIRT-Data metadata, wherein two fields of information in each instruction are randomly masked.\n- default+summary: This type includes instructions with the GIRT-Data metadata and the field of summary.\n- default+summary+mask: This type includes instructions with the GIRT-Data metadata and the field of summary. Also, two fields of information in each instruction are randomly masked.", "## Usage" ]
[ "TAGS\n#task_categories-text2text-generation #task_categories-text-generation #language-English #license-mit #arxiv-2402.02632 #arxiv-2303.09236 #region-us \n", "# GIRT-Instruct Corpus\n\nPaper: URL\n\nA dataset in the format of pairs of instructions and corresponding outputs. GIRT-Instruct is constructed based on GIRT-Data, a dataset of IRTs. \nWe use both GIRT-Data metadata and the Zephyr-7B-Beta language model to generate the instructions\n\n\nThis dataset is used to train the GIRT-Model model.\n\n\n- Model: model\n- Space: space", "## Type \n\nWe have 4 different types in GIRT-Instruct. These types include:\n\n- default: This type includes instructions with the GIRT-Data metadata.\n- default+mask: This type includes instructions with the GIRT-Data metadata, wherein two fields of information in each instruction are randomly masked.\n- default+summary: This type includes instructions with the GIRT-Data metadata and the field of summary.\n- default+summary+mask: This type includes instructions with the GIRT-Data metadata and the field of summary. Also, two fields of information in each instruction are randomly masked.", "## Usage" ]
[ 56, 97, 138, 3 ]
[ "passage: TAGS\n#task_categories-text2text-generation #task_categories-text-generation #language-English #license-mit #arxiv-2402.02632 #arxiv-2303.09236 #region-us \n# GIRT-Instruct Corpus\n\nPaper: URL\n\nA dataset in the format of pairs of instructions and corresponding outputs. GIRT-Instruct is constructed based on GIRT-Data, a dataset of IRTs. \nWe use both GIRT-Data metadata and the Zephyr-7B-Beta language model to generate the instructions\n\n\nThis dataset is used to train the GIRT-Model model.\n\n\n- Model: model\n- Space: space## Type \n\nWe have 4 different types in GIRT-Instruct. These types include:\n\n- default: This type includes instructions with the GIRT-Data metadata.\n- default+mask: This type includes instructions with the GIRT-Data metadata, wherein two fields of information in each instruction are randomly masked.\n- default+summary: This type includes instructions with the GIRT-Data metadata and the field of summary.\n- default+summary+mask: This type includes instructions with the GIRT-Data metadata and the field of summary. Also, two fields of information in each instruction are randomly masked.## Usage" ]
8a63a3ee346ec872d6d377326659fe5ce77d45c0
<h2 style="-webkit-text-stroke-width: 0px; background-color: white; box-sizing: border-box; color: black; font-family: Roboto, Helvetica, Arial, sans-serif; font-size: 1.5em; font-style: normal; font-variant-caps: normal; font-variant-ligatures: normal; font-weight: bold; letter-spacing: normal; line-height: 1.1; margin: 10px 0px; orphans: 2; padding: 0px; text-align: start; text-decoration-color: initial; text-decoration-style: initial; text-decoration-thickness: initial; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px;"><a href="https://www.healthsupplement24x7.com/get-glucodyn" target="_blank"><span style="background-color: red; box-sizing: border-box;"><strong style="box-sizing: border-box; font-style: normal; font-weight: bold;"><span style="box-sizing: border-box; color: #ffd966;">Glucodyn &ndash; Official Website Link &ndash; Click Here</span></strong></span></a></h2> <p style="-webkit-text-stroke-width: 0px; background-color: white; box-sizing: border-box; color: #333333; font-family: Roboto, Helvetica, Arial, sans-serif; font-size: 14px; font-style: normal; font-variant-caps: normal; font-variant-ligatures: normal; font-weight: 400; letter-spacing: normal; margin: 0px 0px 10px; orphans: 2; padding: 0px; text-align: start; text-decoration-color: initial; text-decoration-style: initial; text-decoration-thickness: initial; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px;"><strong style="box-sizing: border-box; font-style: normal; font-weight: bold;"><span style="box-sizing: border-box; color: magenta;">➥ Where to Get Bottle Online -</span> <a href="https://www.healthsupplement24x7.com/get-glucodyn"><span style="background-color: white; box-sizing: border-box; color: red;">https://www.healthsupplement24x7.com/get-glucodyn</span></a><br style="box-sizing: border-box;" /><span style="box-sizing: border-box; color: green;">➥ Product Name -</span> GLUCODYN!<br style="box-sizing: border-box;" /><span style="box-sizing: border-box; color: #993300;">➥ Side Effects -</span>&nbsp;<span style="box-sizing: border-box; color: navy;">No Major Side Effects</span><br style="box-sizing: border-box;" /><span style="box-sizing: border-box; color: #993366;">➥ Category -</span>&nbsp;<span style="box-sizing: border-box; color: #333333;">Health (BLOOD SUGAR SUPPORT)</span><br style="box-sizing: border-box;" /><span style="box-sizing: border-box; color: maroon;">➥ Results -</span>&nbsp;<span style="box-sizing: border-box; color: #00ccff;">In 1-2 Months</span><br style="box-sizing: border-box;" /><span style="box-sizing: border-box; color: red;">➥ Availability &ndash;</span>&nbsp;<a href="https://www.healthsupplement24x7.com/get-glucodyn"><span style="background-color: transparent; box-sizing: border-box; color: black; text-decoration: none;"><span style="box-sizing: border-box; color: #ff6600;">Online</span></span></a><br style="box-sizing: border-box;" /><span style="box-sizing: border-box; color: #333300;">➥ Rating: -</span>&nbsp;<span style="box-sizing: border-box; color: red;">5.0/5.0</span>&nbsp;⭐⭐⭐⭐⭐</strong></p> <h2 style="-webkit-text-stroke-width: 0px; background-color: white; box-sizing: border-box; color: black; font-family: Roboto, Helvetica, Arial, sans-serif; font-size: 1.5em; font-style: normal; font-variant-caps: normal; font-variant-ligatures: normal; font-weight: bold; letter-spacing: normal; line-height: 1.1; margin: 10px 0px; orphans: 2; padding: 0px; text-align: start; text-decoration-color: initial; text-decoration-style: initial; text-decoration-thickness: initial; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px;"><a href="https://www.healthsupplement24x7.com/get-glucodyn" target="_blank"><strong style="box-sizing: border-box; font-style: normal; font-weight: bold;">✅<span style="background-color: red; box-sizing: border-box;"><span style="box-sizing: border-box; color: #ffd966;">Click Here To Visit &ndash; &ldquo;OFFICIAL WEBSITE</span></span><span style="background-color: red; box-sizing: border-box; color: #ffcc00;">&rdquo;</span>✅</strong></a></h2> <h2 style="-webkit-text-stroke-width: 0px; background-color: white; box-sizing: border-box; color: black; font-family: Roboto, Helvetica, Arial, sans-serif; font-size: 1.5em; font-style: normal; font-variant-caps: normal; font-variant-ligatures: normal; font-weight: bold; letter-spacing: normal; line-height: 1.1; margin: 10px 0px; orphans: 2; padding: 0px; text-align: start; text-decoration-color: initial; text-decoration-style: initial; text-decoration-thickness: initial; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px;"><a href="https://www.healthsupplement24x7.com/get-glucodyn" target="_blank"><strong style="box-sizing: border-box; font-style: normal; font-weight: bold;">✅<span style="background-color: red; box-sizing: border-box;"><span style="box-sizing: border-box; color: #ffd966;">Click Here To Visit &ndash; &ldquo;OFFICIAL WEBSITE</span></span><span style="background-color: red; box-sizing: border-box; color: #ffcc00;">&rdquo;</span>✅</strong></a></h2> <h2 style="-webkit-text-stroke-width: 0px; background-color: white; box-sizing: border-box; color: black; font-family: Roboto, Helvetica, Arial, sans-serif; font-size: 1.5em; font-style: normal; font-variant-caps: normal; font-variant-ligatures: normal; font-weight: bold; letter-spacing: normal; line-height: 1.1; margin: 10px 0px; orphans: 2; padding: 0px; text-align: start; text-decoration-color: initial; text-decoration-style: initial; text-decoration-thickness: initial; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px;"><a href="https://www.healthsupplement24x7.com/get-glucodyn" target="_blank"><strong style="box-sizing: border-box; font-style: normal; font-weight: bold;">✅<span style="background-color: red; box-sizing: border-box;"><span style="box-sizing: border-box; color: #ffd966;">Click Here To Visit &ndash; &ldquo;OFFICIAL WEBSITE</span></span><span style="background-color: red; box-sizing: border-box; color: #ffcc00;">&rdquo;</span>✅</strong></a></h2> <p><strong>Glucodyn Reviews:</strong> Most diabetics have a hard time changing because they have had diabetes for a long time. To make it easier for them, the <a href="https://sites.google.com/view/glucodyn-glucodyn/home">Glucodyn</a> blood sugar supplement contains probiotics, MCT oil, and other unique ingredients in a capsule form that will help them change their metabolism.</p> <p><a href="https://sites.google.com/view/glucodyn-blood-sugar-formula/home">Glucodyn</a> does a great job of controlling your blood sugar level. It is designed to lower your A1C level, and weight loss is also one of the many benefits. By taking the Glucodyn capsule, your body can easily convert glucose metabolism into energy and overcome the damage caused by long-term insulin resistance. Glucodyn has nine high-quality detoxifying nutrients in an easy-to-digest form.</p> <div class="separator" style="clear: both; text-align: center;"><a style="margin-left: 1em; margin-right: 1em;" href="https://www.healthsupplement24x7.com/get-glucodyn" target="_blank"><img src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiKCgHqq5eZeqUExchQkDhVfnFZncAeKYRE2I6M0rbacNiikbk503LlJfDyxKWpTC0fU9_t8pmi9pN1PlYLvFWlUG3vIOu4d5Sad5uAo-iHFWlRzP2CN8T2JNNSHh8WBuYJOW6F1jvGF8Wp6Q1AYHDPF6O3rZOc8zsWYbo5qSg5wawPJM9AqB_V7-HIXXw/w640-h512/Glucodyn%209.png" alt="" width="640" height="512" border="0" data-original-height="600" data-original-width="751" /></a></div> <h2>What is Glucodyn?</h2> <p><a href="https://glucodyn-update.webflow.io/">Glucodyn</a> is a dietary supplement that reduces imbalanced blood sugar levels. The revolutionary supplement has detoxifying nutrients that help flush out toxins from the body, thus lowering blood sugar levels.</p> <p>The blood sugar support targets the root cause of abnormal blood sugar levels in the body. It has superior ingredients in the right proportion that are scientifically proven to reduce type 2 diabetes. The natural ingredients in Glucodyn increase energy levels and prevent disruption of metabolic function.</p> <p>Lack of nutrients in the body causes an attack on the metabolism balance and collapses hormonal growth. <a href="https://glucodyn.clubeo.com/page/glucodyn-reviews-are-blood-sugar-disease-reversible-is-glucodyn-scam-or-legit.html">Glucodyn</a> prevents all these from happening. It restores and repairs damaged arteries, therefore, improving your metabolism.</p> <p>Consuming <a href="https://groups.google.com/g/glucodyn-official-wesbite/c/ASTqsOeqi3Q">Glucodyn</a> supplements daily reduces insulin resistance and improves insulin production in the pancreas. The formula works effectively without following a strict diet or exercise routine.</p> <h2 style="text-align: center;"><a href="https://www.healthsupplement24x7.com/get-glucodyn"><span style="color: red;">CHRISTMAS OFFER IS LIVE -- GO AND GRAB YOUR GLUCODYN ON SPECIAL PRICE! LIMITED TIME OFFER</span></a></h2> <h2>How Does Glucodyn Works?</h2> <p><a href="https://gamma.app/public/Glucodyn-Reviews-z21hgbrcrmq3bc5?mode=doc">Glucodyn</a> is the best solution for type 2 diabetes. It addresses the root cause of imbalanced blood sugar levels in the body. The powerful blend of ingredients in the blood sugar support formula replenishes and rejuvenates the good bacteria in the gut and flushes out toxins.</p> <p>It clears the gastrointestinal tract of free radicals that cause oxidative damage. Glucodyn creates a protective shield against candida intoxication by improving the production of antibodies. The formula helps restore your poor glucose metabolism damaged by toxic candida.</p> <p>Flush out toxic candida <a href="https://www.townscript.com/e/glucodyn-new-2024-does-it-really-works-or-scam-000214">Glucodyn</a> is beneficial in restoring good gut bacteria health. It helps eliminate toxic candida, the primary cause of type 2 diabetes. Lactobacillus Acidophilus in Glucodyn improves the production of antibodies that fight Candida albicans antigens.</p> <h2>Glucodyn Supplement Ingredients</h2> <p>Glucodyn dietary supplement contains potent ingredients that improve your metabolism and eliminate harmful substances that inhibit metabolic processes in the body. Each element is backed by scientific research and has outstanding health benefits. Here are the active components of Glucodyn:</p> <p><strong>Lactobacillus Acidophilus:</strong></p> <p>Lactobacillus Acidophilus helps remove toxic candida that causes a spike in glucose levels. It supports the production of antibodies that help fight candida albicana antigens. The ingredient helps as a protective shield against future candida attacks while improving metabolism.</p> <p><strong>Lactobacillus Rhamnosus:</strong></p> <p>The component supports insulin sensitivity, reduces oxidative stress, and improves glycemic control in people with diabetes. Lactobacillus rhamnosus collects waste in the gastrointestinal tract and flushes it away, boosting the gut microbiota. Much research shows that</p> <p><strong>Bifidobacterium Longum:</strong></p> <p>Bifidobacterium Longum is a probiotic that supports the body&rsquo;s health, wellness and detoxification processes. It has glucose-regulating effects and helps refresh and rejuvenate the good bacteria in the gut while preventing the accumulation of toxic candida.</p> <p><strong>Bifidobacterium Breve:</strong></p> <p>According to traditional medicine, bifidobacterium Breve can treat constipation and diarrhea. It helps eliminate abdominal discomfort caused by digestive disorders and improves stool frequency, especially in children with constipation. Bifidobacterium Breve decreases abdominal fat, improves respiratory health, and reduces the risk of some types of cancer.</p> <p><strong>MCT Oil:</strong></p> <p>he MCT Oil generally aids in increasing energy levels, controlling type 2 diabetes, and lowering blood sugar levels to reduce the excess fat deposits. It might help lessen the visible scars on your body and maintain a moderate blood sugar level.</p> <h2 style="text-align: center;"><a href="https://www.healthsupplement24x7.com/get-glucodyn"><span style="color: red;">CHRISTMAS OFFER IS LIVE -- GO AND GRAB YOUR GLUCODYN ON SPECIAL PRICE! LIMITED TIME OFFER</span></a></h2> <div class="elementor-container elementor-column-gap-default"> <div class="elementor-column elementor-col-100 elementor-top-column elementor-element elementor-element-ce6fa9b" data-element_type="column" data-id="ce6fa9b"> <div class="elementor-widget-wrap elementor-element-populated"> <div class="elementor-element elementor-element-b15bca4 elementor-widget elementor-widget-heading" data-element_type="widget" data-id="b15bca4" data-widget_type="heading.default"> <div class="elementor-widget-container"> <div class="separator" style="clear: both; text-align: center;"><a style="margin-left: 1em; margin-right: 1em;" href="https://www.healthsupplement24x7.com/get-glucodyn" target="_blank"><img src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgS8soJTFLH2yyaxpSvcThQdumIhlJKiXF4N3ZrX-fO13fL1LcK2yjNVg1RTF3OBhjRrlsgu4Dl3qeDkNerAz_wFLF_J9kTj4Ay3EgTJcSOPNL8VY7g2J0TVfAbTT6L6_2P9mr83q3Y2Jx_4UlApjhQMoYJyaJy7hm49AJOR3GdVSAFjXkRnFE6fSn_8nA/w640-h372/Glucodyn%206.jpg" alt="" width="640" height="372" border="0" data-original-height="581" data-original-width="999" /></a></div> <h2 class="elementor-heading-title elementor-size-default">Benefits Of Glucodyn Supplement</h2> </div> </div> </div> </div> </div> <div class="elementor-container elementor-column-gap-default"> <div class="elementor-column elementor-col-100 elementor-top-column elementor-element elementor-element-b8fb747" data-element_type="column" data-id="b8fb747"> <div class="elementor-widget-wrap elementor-element-populated"> <div class="elementor-element elementor-element-b147bb0 elementor-widget elementor-widget-text-editor" data-element_type="widget" data-id="b147bb0" data-widget_type="text-editor.default"> <div class="elementor-widget-container"> <div> <div> <div> <p><a href="https://www.facebook.com/groups/glucodyn">Glucodyn</a> Blood Sugar supplement is still new in the market, still it has received thousands of positive reviews from consumers who have had a chance to use it. Many are satisfied with its results, with some saying that they noticed a change in less than two weeks.</p> <p><strong>Enhance metabolism</strong></p> <p>This blood sugar supplement aids in optimizing blood sugar levels by supporting metabolism. It accelerates fat burning by eliminating harmful gut bacteria.</p> <p><strong>Maintain balanced blood sugar</strong></p> <p><a href="https://medium.com/@craigstarc/glucodyn-reviews-a-potent-nutrient-rich-blood-sugar-support-formula-01bd44d700f2">Glucodyn</a> nourishes and revitalizes beneficial gut bacteria, enhancing glucose metabolism. It aids the body in restoring its natural ability to regulate blood sugar levels effectively.</p> <p><strong>Reduce hunger and cravings</strong></p> <p>The blood sugar support formula nurtures aging cells, reducing hunger pangs and unnecessary food cravings.</p> <p><strong>Target the root cause of diabetes</strong></p> <p>According to <a href="https://grabcad.com/library/glucodyn-new-2024-does-it-really-works-or-scam-1">Glucodyn</a>'s creators, the primary cause of type 2 diabetes stems from the proliferation of toxic candida in gut flora. Glucodyn eradicates toxic candida while promoting the health of beneficial gut bacteria.</p> <p><strong>Improve digestion</strong></p> <p><a href="https://pdfhost.io/v/gOrXYlzhFE_Glucodyn_CRITCAL_NEWS_2024_SCAM_Exposed_By_LEGIT_Customers_Know_This_First">Glucodyn</a> supports blood sugar levels and enhances nutrient absorption from food, lowering the likelihood of digestive issues like constipation, bloating, and gas.</p> <h2>Is Glucodyn Safe?</h2> <p>Glucodyn supplement contains 100% natural and safe ingredients. It is therefore completely safe, effective, and natural. Glucodyn is used daily by thousands of people.There have been no reported side effects. Glucodyn are made in the USA at our FDA-approved, GMP-certified facility.We adhere to the highest standards. It is 100% natural, vegetarian, and non-GMO. Before using, consult your doctor if you have any medical conditions.</p> <h2 style="text-align: center;"><a href="https://www.healthsupplement24x7.com/get-glucodyn"><span style="color: red;">CHRISTMAS OFFER IS LIVE -- GO AND GRAB YOUR GLUCODYN ON SPECIAL PRICE! LIMITED TIME OFFER</span></a></h2> <h2>Where can I buy Glucodyn?</h2> <p>If you are thinking about Amazon or eBay, it is not available there. But you can buy Glucodyn from the brand&rsquo;s official website, which goes by the same name. It is very easy to order from their website, and every purchase is a one-time purchase.</p> <h2 style="background-color: white; box-sizing: border-box; color: black; font-family: Roboto, Helvetica, Arial, sans-serif; font-size: 1.5em; font-style: normal; font-variant-caps: normal; font-variant-ligatures: normal; font-weight: bold; letter-spacing: normal; line-height: 1.1; margin: 10px 0px; padding: 0px; text-align: center; text-decoration-color: initial; text-decoration-style: initial; text-decoration-thickness: initial; text-indent: 0px; text-transform: none; white-space: normal; word-spacing: 0px;" data-original-attrs="{&quot;style&quot;:&quot;-webkit-text-stroke-width: 0px; orphans: 2; widows: 2;&quot;}"><span style="background-color: red; box-sizing: border-box;" data-keep-original-tag="false" data-original-attrs="{&quot;style&quot;:&quot;&quot;}"><strong style="box-sizing: border-box; font-style: normal; font-weight: bold;" data-original-attrs="{&quot;style&quot;:&quot;&quot;}"><span style="box-sizing: border-box; color: #ffd966;" data-keep-original-tag="false" data-original-attrs="{&quot;style&quot;:&quot;&quot;}"><a style="margin-left: 1em; margin-right: 1em;" href="https://www.healthsupplement24x7.com/get-glucodyn" target="_blank" data-original-attrs="{&quot;data-original-href&quot;:&quot;https://www.healthsupplement24x7.com/get-glucodyn&quot;,&quot;target&quot;:&quot;_blank&quot;}"><img src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEirf94LBy6fUQG_keJtu4EfNrdwEvBeUqwCdFeREoOplqmT57gE34QZTyJh9tMDxksZuywPGktbcCJAgznzyiaqz1ihG12wDLUZRPPb_k9Mf6Ul17-pwCBha0aLmsO29-FT3eLcvRkASJ9qCupIktipA9lQbIrfhCouzhM19GeKpcbh_rwOeDTAqpow6Xg/w640-h376/Screenshot%20(1355).png" alt="" width="640" height="376" border="0" data-original-height="678" data-original-width="1156" /></a></span></strong></span></h2> <h3>How Much Does Glucodyn Cost?</h3> <p>You would be surprised to learn something that can give a blooming well-being cost as little as $49! Here&rsquo;s the Glucodyn price list:</p> <p>1 Bottle (30-Day Supply): $69 per bottle.</p> <p>6 Bottles (180 Day Supply): $49 per bottle. The total is $294. FREE U.S. SHIPPING.</p> <p>3 Bottles (90-Day Supply): $59 per bottle. The total is $177. FREE U.S. SHIPPING.</p> <h2>Glucodyn Reviews: The Final Verdict</h2> <p>The truth about <a href="https://devfolio.co/@glucodynpills">Glucodyn</a> is that it is an excellent product for people who have diabetes because it helps you maintain your glucose level naturally. There are several brands of blood sugar supplements, but you&rsquo;ll love the way Glucodyn works. It is not only because of the folks who tried it and call it their revelation of their lives. But it is because Glucodyn is proven to work in clinical studies. This product can be taken either alone or with other medications. It&rsquo;s easy to use and easy for you. It is also available at a very low price. Plus, you are protected by Glucodyn&rsquo;s 60-day money-back guarantee.</p> <h3>READ MORE OFFICIAL WEBSITE:</h3> <p><a href="https://glucodyn.clubeo.com/page/glucodyn-reviews-are-blood-sugar-disease-reversible-is-glucodyn-scam-or-legit.html">https://glucodyn.clubeo.com/page/glucodyn-reviews-are-blood-sugar-disease-reversible-is-glucodyn-scam-or-legit.html</a></p> <p><a href="https://groups.google.com/a/chromium.org/g/telemetry/c/aenZf4hmVUE">https://groups.google.com/a/chromium.org/g/telemetry/c/aenZf4hmVUE</a></p> <p><a href="https://sites.google.com/view/glucodyn-glucodyn/home">https://sites.google.com/view/glucodyn-glucodyn/home</a></p> <p><a href="https://groups.google.com/a/chromium.org/g/chromium-reviews/c/xzEGplkTAwQ">https://groups.google.com/a/chromium.org/g/chromium-reviews/c/xzEGplkTAwQ</a></p> <p><a href="https://sites.google.com/view/glucodyn-blood-sugar-formula/home">https://sites.google.com/view/glucodyn-blood-sugar-formula/home</a></p> <p><a href="https://glucodyn.clubeo.com/calendar/2023/12/23/glucodyn-things-you-need-to-know-about-shocking-price-where-to-buy?_ga=2.157531130.1317085844.1703305804-2054333221.1703305800">https://glucodyn.clubeo.com/calendar/2023/12/23/glucodyn-things-you-need-to-know-about-shocking-price-where-to-buy</a></p> <p><a href="https://groups.google.com/g/glucodyn-official-wesbite/c/ASTqsOeqi3Q">https://groups.google.com/g/glucodyn-official-wesbite/c/ASTqsOeqi3Q</a></p> <p><a href="https://gamma.app/public/Glucodyn-Reviews-z21hgbrcrmq3bc5?mode=doc">https://gamma.app/public/Glucodyn-Reviews-z21hgbrcrmq3bc5?mode=doc</a></p> <p><a href="https://www.townscript.com/e/glucodyn-new-2024-does-it-really-works-or-scam-000214">https://www.townscript.com/e/glucodyn-new-2024-does-it-really-works-or-scam-000214</a></p> <p><a href="https://www.facebook.com/groups/glucodyn">https://www.facebook.com/groups/glucodyn</a></p> <p><a href="https://medium.com/@craigstarc/glucodyn-reviews-a-potent-nutrient-rich-blood-sugar-support-formula-01bd44d700f2">https://medium.com/@craigstarc/glucodyn-reviews-a-potent-nutrient-rich-blood-sugar-support-formula-01bd44d700f2</a></p> <p><a href="https://grabcad.com/library/glucodyn-new-2024-does-it-really-works-or-scam-1">https://grabcad.com/library/glucodyn-new-2024-does-it-really-works-or-scam-1</a></p> <p><a href="https://pdfhost.io/v/gOrXYlzhFE_Glucodyn_CRITCAL_NEWS_2024_SCAM_Exposed_By_LEGIT_Customers_Know_This_First">https://pdfhost.io/v/gOrXYlzhFE_Glucodyn_CRITCAL_NEWS_2024_SCAM_Exposed_By_LEGIT_Customers_Know_This_First</a></p> <p><a href="https://devfolio.co/@glucodynpills">https://devfolio.co/@glucodynpills</a></p> </div> </div> </div> </div> </div> </div> </div> </div>
glucodyn/Glucodyn
[ "region:us" ]
2023-12-23T10:20:42+00:00
{}
2023-12-23T10:20:58+00:00
[]
[]
TAGS #region-us
<h2 style="-webkit-text-stroke-width: 0px; background-color: white; box-sizing: border-box; color: black; font-family: Roboto, Helvetica, Arial, sans-serif; font-size: 1.5em; font-style: normal; font-variant-caps: normal; font-variant-ligatures: normal; font-weight: bold; letter-spacing: normal; line-height: 1.1; margin: 10px 0px; orphans: 2; padding: 0px; text-align: start; text-decoration-color: initial; text-decoration-style: initial; text-decoration-thickness: initial; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px;"><a href="URL target="_blank"><span style="background-color: red; box-sizing: border-box;"><strong style="box-sizing: border-box; font-style: normal; font-weight: bold;"><span style="box-sizing: border-box; color: #ffd966;">Glucodyn &ndash; Official Website Link &ndash; Click Here</span></strong></span></a></h2> <p style="-webkit-text-stroke-width: 0px; background-color: white; box-sizing: border-box; color: #333333; font-family: Roboto, Helvetica, Arial, sans-serif; font-size: 14px; font-style: normal; font-variant-caps: normal; font-variant-ligatures: normal; font-weight: 400; letter-spacing: normal; margin: 0px 0px 10px; orphans: 2; padding: 0px; text-align: start; text-decoration-color: initial; text-decoration-style: initial; text-decoration-thickness: initial; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px;"><strong style="box-sizing: border-box; font-style: normal; font-weight: bold;"><span style="box-sizing: border-box; color: magenta;"> Where to Get Bottle Online -</span> <a href="URL style="background-color: white; box-sizing: border-box; color: red;">URL style="box-sizing: border-box;" /><span style="box-sizing: border-box; color: green;"> Product Name -</span> GLUCODYN!<br style="box-sizing: border-box;" /><span style="box-sizing: border-box; color: #993300;"> Side Effects -</span>&nbsp;<span style="box-sizing: border-box; color: navy;">No Major Side Effects</span><br style="box-sizing: border-box;" /><span style="box-sizing: border-box; color: #993366;"> Category -</span>&nbsp;<span style="box-sizing: border-box; color: #333333;">Health (BLOOD SUGAR SUPPORT)</span><br style="box-sizing: border-box;" /><span style="box-sizing: border-box; color: maroon;"> Results -</span>&nbsp;<span style="box-sizing: border-box; color: #00ccff;">In 1-2 Months</span><br style="box-sizing: border-box;" /><span style="box-sizing: border-box; color: red;"> Availability &ndash;</span>&nbsp;<a href="URL style="background-color: transparent; box-sizing: border-box; color: black; text-decoration: none;"><span style="box-sizing: border-box; color: #ff6600;">Online</span></span></a><br style="box-sizing: border-box;" /><span style="box-sizing: border-box; color: #333300;"> Rating: -</span>&nbsp;<span style="box-sizing: border-box; color: red;">5.0/5.0</span>&nbsp;⭐⭐⭐⭐⭐</strong></p> <h2 style="-webkit-text-stroke-width: 0px; background-color: white; box-sizing: border-box; color: black; font-family: Roboto, Helvetica, Arial, sans-serif; font-size: 1.5em; font-style: normal; font-variant-caps: normal; font-variant-ligatures: normal; font-weight: bold; letter-spacing: normal; line-height: 1.1; margin: 10px 0px; orphans: 2; padding: 0px; text-align: start; text-decoration-color: initial; text-decoration-style: initial; text-decoration-thickness: initial; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px;"><a href="URL target="_blank"><strong style="box-sizing: border-box; font-style: normal; font-weight: bold;"><span style="background-color: red; box-sizing: border-box;"><span style="box-sizing: border-box; color: #ffd966;">Click Here To Visit &ndash; &ldquo;OFFICIAL WEBSITE</span></span><span style="background-color: red; box-sizing: border-box; color: #ffcc00;">&rdquo;</span></strong></a></h2> <h2 style="-webkit-text-stroke-width: 0px; background-color: white; box-sizing: border-box; color: black; font-family: Roboto, Helvetica, Arial, sans-serif; font-size: 1.5em; font-style: normal; font-variant-caps: normal; font-variant-ligatures: normal; font-weight: bold; letter-spacing: normal; line-height: 1.1; margin: 10px 0px; orphans: 2; padding: 0px; text-align: start; text-decoration-color: initial; text-decoration-style: initial; text-decoration-thickness: initial; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px;"><a href="URL target="_blank"><strong style="box-sizing: border-box; font-style: normal; font-weight: bold;"><span style="background-color: red; box-sizing: border-box;"><span style="box-sizing: border-box; color: #ffd966;">Click Here To Visit &ndash; &ldquo;OFFICIAL WEBSITE</span></span><span style="background-color: red; box-sizing: border-box; color: #ffcc00;">&rdquo;</span></strong></a></h2> <h2 style="-webkit-text-stroke-width: 0px; background-color: white; box-sizing: border-box; color: black; font-family: Roboto, Helvetica, Arial, sans-serif; font-size: 1.5em; font-style: normal; font-variant-caps: normal; font-variant-ligatures: normal; font-weight: bold; letter-spacing: normal; line-height: 1.1; margin: 10px 0px; orphans: 2; padding: 0px; text-align: start; text-decoration-color: initial; text-decoration-style: initial; text-decoration-thickness: initial; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px;"><a href="URL target="_blank"><strong style="box-sizing: border-box; font-style: normal; font-weight: bold;"><span style="background-color: red; box-sizing: border-box;"><span style="box-sizing: border-box; color: #ffd966;">Click Here To Visit &ndash; &ldquo;OFFICIAL WEBSITE</span></span><span style="background-color: red; box-sizing: border-box; color: #ffcc00;">&rdquo;</span></strong></a></h2> <p><strong>Glucodyn Reviews:</strong> Most diabetics have a hard time changing because they have had diabetes for a long time. To make it easier for them, the <a href="URL blood sugar supplement contains probiotics, MCT oil, and other unique ingredients in a capsule form that will help them change their metabolism.</p> <p><a href="URL does a great job of controlling your blood sugar level. It is designed to lower your A1C level, and weight loss is also one of the many benefits. By taking the Glucodyn capsule, your body can easily convert glucose metabolism into energy and overcome the damage caused by long-term insulin resistance. Glucodyn has nine high-quality detoxifying nutrients in an easy-to-digest form.</p> <div class="separator" style="clear: both; text-align: center;"><a style="margin-left: 1em; margin-right: 1em;" href="URL target="_blank"><img src="URL alt="" width="640" height="512" border="0" data-original-height="600" data-original-width="751" /></a></div> <h2>What is Glucodyn?</h2> <p><a href="URL is a dietary supplement that reduces imbalanced blood sugar levels. The revolutionary supplement has detoxifying nutrients that help flush out toxins from the body, thus lowering blood sugar levels.</p> <p>The blood sugar support targets the root cause of abnormal blood sugar levels in the body. It has superior ingredients in the right proportion that are scientifically proven to reduce type 2 diabetes. The natural ingredients in Glucodyn increase energy levels and prevent disruption of metabolic function.</p> <p>Lack of nutrients in the body causes an attack on the metabolism balance and collapses hormonal growth. <a href="URL prevents all these from happening. It restores and repairs damaged arteries, therefore, improving your metabolism.</p> <p>Consuming <a href="URL supplements daily reduces insulin resistance and improves insulin production in the pancreas. The formula works effectively without following a strict diet or exercise routine.</p> <h2 style="text-align: center;"><a href="URL style="color: red;">CHRISTMAS OFFER IS LIVE -- GO AND GRAB YOUR GLUCODYN ON SPECIAL PRICE! LIMITED TIME OFFER</span></a></h2> <h2>How Does Glucodyn Works?</h2> <p><a href="URL is the best solution for type 2 diabetes. It addresses the root cause of imbalanced blood sugar levels in the body. The powerful blend of ingredients in the blood sugar support formula replenishes and rejuvenates the good bacteria in the gut and flushes out toxins.</p> <p>It clears the gastrointestinal tract of free radicals that cause oxidative damage. Glucodyn creates a protective shield against candida intoxication by improving the production of antibodies. The formula helps restore your poor glucose metabolism damaged by toxic candida.</p> <p>Flush out toxic candida <a href="URL is beneficial in restoring good gut bacteria health. It helps eliminate toxic candida, the primary cause of type 2 diabetes. Lactobacillus Acidophilus in Glucodyn improves the production of antibodies that fight Candida albicans antigens.</p> <h2>Glucodyn Supplement Ingredients</h2> <p>Glucodyn dietary supplement contains potent ingredients that improve your metabolism and eliminate harmful substances that inhibit metabolic processes in the body. Each element is backed by scientific research and has outstanding health benefits. Here are the active components of Glucodyn:</p> <p><strong>Lactobacillus Acidophilus:</strong></p> <p>Lactobacillus Acidophilus helps remove toxic candida that causes a spike in glucose levels. It supports the production of antibodies that help fight candida albicana antigens. The ingredient helps as a protective shield against future candida attacks while improving metabolism.</p> <p><strong>Lactobacillus Rhamnosus:</strong></p> <p>The component supports insulin sensitivity, reduces oxidative stress, and improves glycemic control in people with diabetes. Lactobacillus rhamnosus collects waste in the gastrointestinal tract and flushes it away, boosting the gut microbiota. Much research shows that</p> <p><strong>Bifidobacterium Longum:</strong></p> <p>Bifidobacterium Longum is a probiotic that supports the body&rsquo;s health, wellness and detoxification processes. It has glucose-regulating effects and helps refresh and rejuvenate the good bacteria in the gut while preventing the accumulation of toxic candida.</p> <p><strong>Bifidobacterium Breve:</strong></p> <p>According to traditional medicine, bifidobacterium Breve can treat constipation and diarrhea. It helps eliminate abdominal discomfort caused by digestive disorders and improves stool frequency, especially in children with constipation. Bifidobacterium Breve decreases abdominal fat, improves respiratory health, and reduces the risk of some types of cancer.</p> <p><strong>MCT Oil:</strong></p> <p>he MCT Oil generally aids in increasing energy levels, controlling type 2 diabetes, and lowering blood sugar levels to reduce the excess fat deposits. It might help lessen the visible scars on your body and maintain a moderate blood sugar level.</p> <h2 style="text-align: center;"><a href="URL style="color: red;">CHRISTMAS OFFER IS LIVE -- GO AND GRAB YOUR GLUCODYN ON SPECIAL PRICE! LIMITED TIME OFFER</span></a></h2> <div class="elementor-container elementor-column-gap-default"> <div class="elementor-column elementor-col-100 elementor-top-column elementor-element elementor-element-ce6fa9b" data-element_type="column" data-id="ce6fa9b"> <div class="elementor-widget-wrap elementor-element-populated"> <div class="elementor-element elementor-element-b15bca4 elementor-widget elementor-widget-heading" data-element_type="widget" data-id="b15bca4" data-widget_type="heading.default"> <div class="elementor-widget-container"> <div class="separator" style="clear: both; text-align: center;"><a style="margin-left: 1em; margin-right: 1em;" href="URL target="_blank"><img src="URL alt="" width="640" height="372" border="0" data-original-height="581" data-original-width="999" /></a></div> <h2 class="elementor-heading-title elementor-size-default">Benefits Of Glucodyn Supplement</h2> </div> </div> </div> </div> </div> <div class="elementor-container elementor-column-gap-default"> <div class="elementor-column elementor-col-100 elementor-top-column elementor-element elementor-element-b8fb747" data-element_type="column" data-id="b8fb747"> <div class="elementor-widget-wrap elementor-element-populated"> <div class="elementor-element elementor-element-b147bb0 elementor-widget elementor-widget-text-editor" data-element_type="widget" data-id="b147bb0" data-widget_type="text-editor.default"> <div class="elementor-widget-container"> <div> <div> <div> <p><a href="URL Blood Sugar supplement is still new in the market, still it has received thousands of positive reviews from consumers who have had a chance to use it. Many are satisfied with its results, with some saying that they noticed a change in less than two weeks.</p> <p><strong>Enhance metabolism</strong></p> <p>This blood sugar supplement aids in optimizing blood sugar levels by supporting metabolism. It accelerates fat burning by eliminating harmful gut bacteria.</p> <p><strong>Maintain balanced blood sugar</strong></p> <p><a href="URL nourishes and revitalizes beneficial gut bacteria, enhancing glucose metabolism. It aids the body in restoring its natural ability to regulate blood sugar levels effectively.</p> <p><strong>Reduce hunger and cravings</strong></p> <p>The blood sugar support formula nurtures aging cells, reducing hunger pangs and unnecessary food cravings.</p> <p><strong>Target the root cause of diabetes</strong></p> <p>According to <a href="URL creators, the primary cause of type 2 diabetes stems from the proliferation of toxic candida in gut flora. Glucodyn eradicates toxic candida while promoting the health of beneficial gut bacteria.</p> <p><strong>Improve digestion</strong></p> <p><a href="URL supports blood sugar levels and enhances nutrient absorption from food, lowering the likelihood of digestive issues like constipation, bloating, and gas.</p> <h2>Is Glucodyn Safe?</h2> <p>Glucodyn supplement contains 100% natural and safe ingredients. It is therefore completely safe, effective, and natural. Glucodyn is used daily by thousands of people.There have been no reported side effects. Glucodyn are made in the USA at our FDA-approved, GMP-certified facility.We adhere to the highest standards. It is 100% natural, vegetarian, and non-GMO. Before using, consult your doctor if you have any medical conditions.</p> <h2 style="text-align: center;"><a href="URL style="color: red;">CHRISTMAS OFFER IS LIVE -- GO AND GRAB YOUR GLUCODYN ON SPECIAL PRICE! LIMITED TIME OFFER</span></a></h2> <h2>Where can I buy Glucodyn?</h2> <p>If you are thinking about Amazon or eBay, it is not available there. But you can buy Glucodyn from the brand&rsquo;s official website, which goes by the same name. It is very easy to order from their website, and every purchase is a one-time purchase.</p> <h2 style="background-color: white; box-sizing: border-box; color: black; font-family: Roboto, Helvetica, Arial, sans-serif; font-size: 1.5em; font-style: normal; font-variant-caps: normal; font-variant-ligatures: normal; font-weight: bold; letter-spacing: normal; line-height: 1.1; margin: 10px 0px; padding: 0px; text-align: center; text-decoration-color: initial; text-decoration-style: initial; text-decoration-thickness: initial; text-indent: 0px; text-transform: none; white-space: normal; word-spacing: 0px;" data-original-attrs="{&quot;style&quot;:&quot;-webkit-text-stroke-width: 0px; orphans: 2; widows: 2;&quot;}"><span style="background-color: red; box-sizing: border-box;" data-keep-original-tag="false" data-original-attrs="{&quot;style&quot;:&quot;&quot;}"><strong style="box-sizing: border-box; font-style: normal; font-weight: bold;" data-original-attrs="{&quot;style&quot;:&quot;&quot;}"><span style="box-sizing: border-box; color: #ffd966;" data-keep-original-tag="false" data-original-attrs="{&quot;style&quot;:&quot;&quot;}"><a style="margin-left: 1em; margin-right: 1em;" href="URL target="_blank" data-original-attrs="{&quot;data-original-href&quot;:&quot;URL src="URL alt="" width="640" height="376" border="0" data-original-height="678" data-original-width="1156" /></a></span></strong></span></h2> <h3>How Much Does Glucodyn Cost?</h3> <p>You would be surprised to learn something that can give a blooming well-being cost as little as $49! Here&rsquo;s the Glucodyn price list:</p> <p>1 Bottle (30-Day Supply): $69 per bottle.</p> <p>6 Bottles (180 Day Supply): $49 per bottle. The total is $294. FREE U.S. SHIPPING.</p> <p>3 Bottles (90-Day Supply): $59 per bottle. The total is $177. FREE U.S. SHIPPING.</p> <h2>Glucodyn Reviews: The Final Verdict</h2> <p>The truth about <a href="URL is that it is an excellent product for people who have diabetes because it helps you maintain your glucose level naturally. There are several brands of blood sugar supplements, but you&rsquo;ll love the way Glucodyn works. It is not only because of the folks who tried it and call it their revelation of their lives. But it is because Glucodyn is proven to work in clinical studies. This product can be taken either alone or with other medications. It&rsquo;s easy to use and easy for you. It is also available at a very low price. Plus, you are protected by Glucodyn&rsquo;s 60-day money-back guarantee.</p> <h3>READ MORE OFFICIAL WEBSITE:</h3> <p><a href="URL/URL <p><a href="URL/URL <p><a href="URL/URL <p><a href="URL/URL <p><a href="URL/URL <p><a href="URL/URL <p><a href="URL/URL <p><a href="URL/URL <p><a href="URL/URL <p><a href="URL/URL <p><a href="URL/URL <p><a href="URL/URL <p><a href="URL/URL <p><a href="URL/URL </div> </div> </div> </div> </div> </div> </div> </div>
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
b141d97a69e36accc264e2e48bf667147e8c14de
# Dataset Card for Dataset Name <!-- Provide a quick summary of the dataset. --> This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
ebartan/bodrum_belediyesi_fen_isleri_netigma_dataset
[ "region:us" ]
2023-12-23T10:42:31+00:00
{}
2023-12-23T10:47:11+00:00
[]
[]
TAGS #region-us
# Dataset Card for Dataset Name This dataset card aims to be a base template for new datasets. It has been generated using this raw template. ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 34, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
d377b6efccb0c59bda0c8208335ec5f63f5acc63
# Dataset Card for "nli-zh-tw" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Mike0307/nli-zh-tw
[ "region:us" ]
2023-12-23T10:43:59+00:00
{"dataset_info": {"features": [{"name": "text1", "dtype": "string"}, {"name": "text2", "dtype": "string"}, {"name": "label", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 236095492, "num_examples": 393442}, {"name": "test", "num_bytes": 29491031, "num_examples": 49226}, {"name": "validate", "num_bytes": 29404841, "num_examples": 49286}], "download_size": 215294103, "dataset_size": 294991364}}
2023-12-23T10:54:08+00:00
[]
[]
TAGS #region-us
# Dataset Card for "nli-zh-tw" More Information needed
[ "# Dataset Card for \"nli-zh-tw\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"nli-zh-tw\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"nli-zh-tw\"\n\nMore Information needed" ]
196084c40e3b8dc8a4d9985624b0a54069ff76e5
# ELSA - Multimedia use case ![image/gif](https://cdn-uploads.huggingface.co/production/uploads/6380ccd084022715e0d49d4e/6eRNxY1AFfaksVu8oTk8v.gif) **ELSA Multimedia is a large collection of Deep Fake images, generated using diffusion models** ### Dataset Summary This dataset was developed as part of the EU project ELSA. Specifically for the Multimedia use-case. Official webpage: https://benchmarks.elsa-ai.eu/ This dataset aims to develop effective solutions for detecting and mitigating the spread of deep fake images in multimedia content. Deep fake images, which are highly realistic and deceptive manipulations, pose significant risks to privacy, security, and trust in digital media. This dataset can be used to train robust and accurate models that can identify and flag instances of deep fake images. ### ELSA versions | Name | Description | Link | | ------------- | ------------- | ---------------------| | ELSA1M_track1 | Dataset of 1M images generated using diffusion model | https://huggingface.co/datasets/elsaEU/ELSA1M_track1 | | ELSA10M_track1 | Dataset of 10M images generated using four different diffusion models for each caption, multiple image compression formats, multiple aspect ration | https://huggingface.co/datasets/elsaEU/ELSA_D3 | | ELSA500k_track2 | Dataset of 500k images generated using diffusion model with diffusion attentive attribution maps [1] | https://huggingface.co/datasets/elsaEU/ELSA500k_track2 | ```python from datasets import load_dataset elsa_data = load_dataset("elsaEU/ELSA_D3", split="train", streaming=True) ``` Using <a href="https://huggingface.co/docs/datasets/stream">streaming=True</a> lets you work with the dataset without downloading it. ## Dataset Structure Each parquet file contains nearly 1k images and a JSON file with metadata. The Metadata for generated images are: - ID: Laion image ID - original_prompt: Laion Prompt - positive_prompt: positive prompt used for image generation - negative_prompt: negative prompt used for image generation - url: Url of the real image associated with the same prompt - width: width generated image - height: height generated image - num_inference_steps: diffusion steps of the generator - filepath: path of the generated image - model_gen0: Generator 0 name - model_gen1: Generator 1 name - model_gen2: Generator 2 name - model_gen3: Generator 3 name - image_gen0: image generated with generator 0 - image_gen1: image generated with generator 1 - image_gen2: image generated with generator 2 - image_gen3: image generated with generator 3 - aspect_ratio: aspect ratio of the generated image ### Dataset Curators - Leonardo Labs ([email protected]) - UNIMORE (https://aimagelab.ing.unimore.it/imagelab/)
elsaEU/ELSA_D3
[ "region:us" ]
2023-12-23T10:49:21+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": ["data/train-*", "data/val-*"]}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "original_prompt", "dtype": "string"}, {"name": "positive_prompt", "dtype": "string"}, {"name": "negative_prompt", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "model_gen0", "dtype": "string"}, {"name": "model_gen1", "dtype": "string"}, {"name": "model_gen2", "dtype": "string"}, {"name": "model_gen3", "dtype": "string"}, {"name": "width_gen0", "dtype": "int64"}, {"name": "width_gen1", "dtype": "int64"}, {"name": "width_gen2", "dtype": "int64"}, {"name": "width_gen3", "dtype": "int64"}, {"name": "height_gen0", "dtype": "int64"}, {"name": "height_gen1", "dtype": "int64"}, {"name": "height_gen2", "dtype": "int64"}, {"name": "height_gen3", "dtype": "int64"}, {"name": "num_inference_steps_gen0", "dtype": "int64"}, {"name": "num_inference_steps_gen1", "dtype": "int64"}, {"name": "num_inference_steps_gen2", "dtype": "int64"}, {"name": "num_inference_steps_gen3", "dtype": "int64"}, {"name": "filepath_gen0", "dtype": "string"}, {"name": "filepath_gen1", "dtype": "string"}, {"name": "filepath_gen2", "dtype": "string"}, {"name": "filepath_gen3", "dtype": "string"}, {"name": "image_gen0", "dtype": "image"}, {"name": "image_gen1", "dtype": "image"}, {"name": "image_gen2", "dtype": "image"}, {"name": "image_gen3", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 2626848010531.5, "num_examples": 2306629}, {"name": "validation", "num_bytes": 5318900038.0, "num_examples": 4800}], "download_size": 2568003790242, "dataset_size": 2632166910569.5}}
2024-01-15T09:39:14+00:00
[]
[]
TAGS #region-us
ELSA - Multimedia use case ========================== !image/gif ELSA Multimedia is a large collection of Deep Fake images, generated using diffusion models ### Dataset Summary This dataset was developed as part of the EU project ELSA. Specifically for the Multimedia use-case. Official webpage: URL This dataset aims to develop effective solutions for detecting and mitigating the spread of deep fake images in multimedia content. Deep fake images, which are highly realistic and deceptive manipulations, pose significant risks to privacy, security, and trust in digital media. This dataset can be used to train robust and accurate models that can identify and flag instances of deep fake images. ### ELSA versions Name: ELSA1M\_track1, Description: Dataset of 1M images generated using diffusion model, Link: URL Name: ELSA10M\_track1, Description: Dataset of 10M images generated using four different diffusion models for each caption, multiple image compression formats, multiple aspect ration, Link: URL Name: ELSA500k\_track2, Description: Dataset of 500k images generated using diffusion model with diffusion attentive attribution maps [1], Link: URL Using <a href="URL lets you work with the dataset without downloading it. Dataset Structure ----------------- Each parquet file contains nearly 1k images and a JSON file with metadata. The Metadata for generated images are: * ID: Laion image ID * original\_prompt: Laion Prompt * positive\_prompt: positive prompt used for image generation * negative\_prompt: negative prompt used for image generation * url: Url of the real image associated with the same prompt * width: width generated image * height: height generated image * num\_inference\_steps: diffusion steps of the generator * filepath: path of the generated image * model\_gen0: Generator 0 name * model\_gen1: Generator 1 name * model\_gen2: Generator 2 name * model\_gen3: Generator 3 name * image\_gen0: image generated with generator 0 * image\_gen1: image generated with generator 1 * image\_gen2: image generated with generator 2 * image\_gen3: image generated with generator 3 * aspect\_ratio: aspect ratio of the generated image ### Dataset Curators * Leonardo Labs (URL@URL) * UNIMORE (URL
[ "### Dataset Summary\n\n\nThis dataset was developed as part of the EU project ELSA. Specifically for the Multimedia use-case.\nOfficial webpage: URL\nThis dataset aims to develop effective solutions for detecting and mitigating the spread of deep fake images in multimedia content. Deep fake images, which are highly realistic and deceptive manipulations, pose significant risks to privacy, security, and trust in digital media. This dataset can be used to train robust and accurate models that can identify and flag instances of deep fake images.", "### ELSA versions\n\n\nName: ELSA1M\\_track1, Description: Dataset of 1M images generated using diffusion model, Link: URL\nName: ELSA10M\\_track1, Description: Dataset of 10M images generated using four different diffusion models for each caption, multiple image compression formats, multiple aspect ration, Link: URL\nName: ELSA500k\\_track2, Description: Dataset of 500k images generated using diffusion model with diffusion attentive attribution maps [1], Link: URL\n\n\nUsing <a href=\"URL lets you work with the dataset without downloading it.\n\n\nDataset Structure\n-----------------\n\n\nEach parquet file contains nearly 1k images and a JSON file with metadata.\n\n\nThe Metadata for generated images are:\n\n\n* ID: Laion image ID\n* original\\_prompt: Laion Prompt\n* positive\\_prompt: positive prompt used for image generation\n* negative\\_prompt: negative prompt used for image generation\n* url: Url of the real image associated with the same prompt\n* width: width generated image\n* height: height generated image\n* num\\_inference\\_steps: diffusion steps of the generator\n* filepath: path of the generated image\n* model\\_gen0: Generator 0 name\n* model\\_gen1: Generator 1 name\n* model\\_gen2: Generator 2 name\n* model\\_gen3: Generator 3 name\n* image\\_gen0: image generated with generator 0\n* image\\_gen1: image generated with generator 1\n* image\\_gen2: image generated with generator 2\n* image\\_gen3: image generated with generator 3\n* aspect\\_ratio: aspect ratio of the generated image", "### Dataset Curators\n\n\n* Leonardo Labs (URL@URL)\n* UNIMORE (URL" ]
[ "TAGS\n#region-us \n", "### Dataset Summary\n\n\nThis dataset was developed as part of the EU project ELSA. Specifically for the Multimedia use-case.\nOfficial webpage: URL\nThis dataset aims to develop effective solutions for detecting and mitigating the spread of deep fake images in multimedia content. Deep fake images, which are highly realistic and deceptive manipulations, pose significant risks to privacy, security, and trust in digital media. This dataset can be used to train robust and accurate models that can identify and flag instances of deep fake images.", "### ELSA versions\n\n\nName: ELSA1M\\_track1, Description: Dataset of 1M images generated using diffusion model, Link: URL\nName: ELSA10M\\_track1, Description: Dataset of 10M images generated using four different diffusion models for each caption, multiple image compression formats, multiple aspect ration, Link: URL\nName: ELSA500k\\_track2, Description: Dataset of 500k images generated using diffusion model with diffusion attentive attribution maps [1], Link: URL\n\n\nUsing <a href=\"URL lets you work with the dataset without downloading it.\n\n\nDataset Structure\n-----------------\n\n\nEach parquet file contains nearly 1k images and a JSON file with metadata.\n\n\nThe Metadata for generated images are:\n\n\n* ID: Laion image ID\n* original\\_prompt: Laion Prompt\n* positive\\_prompt: positive prompt used for image generation\n* negative\\_prompt: negative prompt used for image generation\n* url: Url of the real image associated with the same prompt\n* width: width generated image\n* height: height generated image\n* num\\_inference\\_steps: diffusion steps of the generator\n* filepath: path of the generated image\n* model\\_gen0: Generator 0 name\n* model\\_gen1: Generator 1 name\n* model\\_gen2: Generator 2 name\n* model\\_gen3: Generator 3 name\n* image\\_gen0: image generated with generator 0\n* image\\_gen1: image generated with generator 1\n* image\\_gen2: image generated with generator 2\n* image\\_gen3: image generated with generator 3\n* aspect\\_ratio: aspect ratio of the generated image", "### Dataset Curators\n\n\n* Leonardo Labs (URL@URL)\n* UNIMORE (URL" ]
[ 6, 114, 387, 21 ]
[ "passage: TAGS\n#region-us \n### Dataset Summary\n\n\nThis dataset was developed as part of the EU project ELSA. Specifically for the Multimedia use-case.\nOfficial webpage: URL\nThis dataset aims to develop effective solutions for detecting and mitigating the spread of deep fake images in multimedia content. Deep fake images, which are highly realistic and deceptive manipulations, pose significant risks to privacy, security, and trust in digital media. This dataset can be used to train robust and accurate models that can identify and flag instances of deep fake images.### ELSA versions\n\n\nName: ELSA1M\\_track1, Description: Dataset of 1M images generated using diffusion model, Link: URL\nName: ELSA10M\\_track1, Description: Dataset of 10M images generated using four different diffusion models for each caption, multiple image compression formats, multiple aspect ration, Link: URL\nName: ELSA500k\\_track2, Description: Dataset of 500k images generated using diffusion model with diffusion attentive attribution maps [1], Link: URL\n\n\nUsing <a href=\"URL lets you work with the dataset without downloading it.\n\n\nDataset Structure\n-----------------\n\n\nEach parquet file contains nearly 1k images and a JSON file with metadata.\n\n\nThe Metadata for generated images are:\n\n\n* ID: Laion image ID\n* original\\_prompt: Laion Prompt\n* positive\\_prompt: positive prompt used for image generation\n* negative\\_prompt: negative prompt used for image generation\n* url: Url of the real image associated with the same prompt\n* width: width generated image\n* height: height generated image\n* num\\_inference\\_steps: diffusion steps of the generator\n* filepath: path of the generated image\n* model\\_gen0: Generator 0 name\n* model\\_gen1: Generator 1 name\n* model\\_gen2: Generator 2 name\n* model\\_gen3: Generator 3 name\n* image\\_gen0: image generated with generator 0\n* image\\_gen1: image generated with generator 1\n* image\\_gen2: image generated with generator 2\n* image\\_gen3: image generated with generator 3\n* aspect\\_ratio: aspect ratio of the generated image" ]
dd409bbc90e2de1ee6b35115e7d4768c74a123bd
Extension/Modification of the original m_lama dataset
atutej/m_lama
[ "region:us" ]
2023-12-23T11:16:50+00:00
{"dataset_info": [{"config_name": "af", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 1364986, "num_examples": 7331}], "download_size": 544481, "dataset_size": 1364986}, {"config_name": "ar", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 4564504, "num_examples": 19354}], "download_size": 1580143, "dataset_size": 4564504}, {"config_name": "az", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 1467465, "num_examples": 7653}], "download_size": 578396, "dataset_size": 1467465}, {"config_name": "be", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2285464, "num_examples": 8853}], "download_size": 714406, "dataset_size": 2285464}, {"config_name": "bg", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 3109085, "num_examples": 12461}], "download_size": 1013009, "dataset_size": 3109085}, {"config_name": "bn", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2969863, "num_examples": 8975}], "download_size": 748274, "dataset_size": 2969863}, {"config_name": "ca", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 4620850, "num_examples": 24287}], "download_size": 1940588, "dataset_size": 4620850}, {"config_name": "ceb", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 1433194, "num_examples": 6769}], "download_size": 524854, "dataset_size": 1433194}, {"config_name": "cs", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2997353, "num_examples": 15848}], "download_size": 1246743, "dataset_size": 2997353}, {"config_name": "cy", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 1901684, "num_examples": 9915}], "download_size": 769225, "dataset_size": 1901684}, {"config_name": "da", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 3672623, "num_examples": 19636}], "download_size": 1535250, "dataset_size": 3672623}, {"config_name": "de", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 6348506, "num_examples": 32548}], "download_size": 2613173, "dataset_size": 6348506}, {"config_name": "el", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 3416098, "num_examples": 12854}], "download_size": 1074167, "dataset_size": 3416098}, {"config_name": "en", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 7031572, "num_examples": 37498}], "download_size": 3023574, "dataset_size": 7031572}, {"config_name": "es", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 6000790, "num_examples": 31578}], "download_size": 2542929, "dataset_size": 6000790}, {"config_name": "et", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 1847160, "num_examples": 9880}], "download_size": 748222, "dataset_size": 1847160}, {"config_name": "eu", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2260887, "num_examples": 11910}], "download_size": 921424, "dataset_size": 2260887}, {"config_name": "fa", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 4482869, "num_examples": 18481}], "download_size": 1497801, "dataset_size": 4482869}, {"config_name": "fi", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 3575879, "num_examples": 19017}], "download_size": 1477166, "dataset_size": 3575879}, {"config_name": "fr", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 6553643, "num_examples": 33872}], "download_size": 2716208, "dataset_size": 6553643}, {"config_name": "ga", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2809813, "num_examples": 13937}], "download_size": 1076939, "dataset_size": 2809813}, {"config_name": "gl", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2062413, "num_examples": 10567}], "download_size": 817987, "dataset_size": 2062413}, {"config_name": "he", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 3273282, "num_examples": 14769}], "download_size": 1165490, "dataset_size": 3273282}, {"config_name": "hi", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2750247, "num_examples": 8570}], "download_size": 707213, "dataset_size": 2750247}, {"config_name": "hr", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 1766612, "num_examples": 9322}], "download_size": 714362, "dataset_size": 1766612}, {"config_name": "hu", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 3629786, "num_examples": 18850}], "download_size": 1485748, "dataset_size": 3629786}, {"config_name": "hy", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2580835, "num_examples": 10030}], "download_size": 809063, "dataset_size": 2580835}, {"config_name": "id", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2693872, "num_examples": 14183}], "download_size": 1103155, "dataset_size": 2693872}, {"config_name": "it", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 5287655, "num_examples": 27648}], "download_size": 2198936, "dataset_size": 5287655}, {"config_name": "ja", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 6105411, "num_examples": 25356}], "download_size": 2091964, "dataset_size": 6105411}, {"config_name": "ka", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2649721, "num_examples": 8099}], "download_size": 647390, "dataset_size": 2649721}, {"config_name": "ko", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 3526211, "num_examples": 16327}], "download_size": 1309593, "dataset_size": 3526211}, {"config_name": "la", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 1581833, "num_examples": 8061}], "download_size": 612760, "dataset_size": 1581833}, {"config_name": "lt", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 1835683, "num_examples": 9560}], "download_size": 736354, "dataset_size": 1835683}, {"config_name": "lv", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 1649860, "num_examples": 8474}], "download_size": 643807, "dataset_size": 1649860}, {"config_name": "ms", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 1768627, "num_examples": 9146}], "download_size": 702211, "dataset_size": 1768627}, {"config_name": "nl", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 6221612, "num_examples": 32423}], "download_size": 2597145, "dataset_size": 6221612}, {"config_name": "pl", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 4013247, "num_examples": 20727}], "download_size": 1644648, "dataset_size": 4013247}, {"config_name": "pt", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 4044269, "num_examples": 21023}], "download_size": 1653658, "dataset_size": 4044269}, {"config_name": "ro", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2523121, "num_examples": 12886}], "download_size": 1007651, "dataset_size": 2523121}, {"config_name": "ru", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 6405438, "num_examples": 25335}], "download_size": 2129105, "dataset_size": 6405438}, {"config_name": "sk", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 1942547, "num_examples": 10205}], "download_size": 788723, "dataset_size": 1942547}, {"config_name": "sl", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 3455705, "num_examples": 18091}], "download_size": 1406987, "dataset_size": 3455705}, {"config_name": "sq", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2404246, "num_examples": 12586}], "download_size": 956395, "dataset_size": 2404246}, {"config_name": "sr", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 3104514, "num_examples": 12477}], "download_size": 1027773, "dataset_size": 3104514}, {"config_name": "sv", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 4536924, "num_examples": 24240}], "download_size": 1905031, "dataset_size": 4536924}, {"config_name": "ta", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2546658, "num_examples": 7223}], "download_size": 599177, "dataset_size": 2546658}, {"config_name": "th", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 3451558, "num_examples": 9786}], "download_size": 851558, "dataset_size": 3451558}, {"config_name": "tr", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2701219, "num_examples": 14209}], "download_size": 1101256, "dataset_size": 2701219}, {"config_name": "uk", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 4528716, "num_examples": 18035}], "download_size": 1523846, "dataset_size": 4528716}, {"config_name": "ur", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 1774430, "num_examples": 7279}], "download_size": 576108, "dataset_size": 1774430}, {"config_name": "vi", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2331103, "num_examples": 11350}], "download_size": 893519, "dataset_size": 2331103}, {"config_name": "zh", "features": [{"name": "uuid", "dtype": "string"}, {"name": "lineid", "dtype": "uint32"}, {"name": "obj_uri", "dtype": "string"}, {"name": "obj_label", "dtype": "string"}, {"name": "sub_uri", "dtype": "string"}, {"name": "sub_label", "dtype": "string"}, {"name": "template", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "predicate_id", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 4178875, "num_examples": 21449}], "download_size": 1747217, "dataset_size": 4178875}], "configs": [{"config_name": "af", "data_files": [{"split": "test", "path": "af/test-*"}]}, {"config_name": "ar", "data_files": [{"split": "test", "path": "ar/test-*"}]}, {"config_name": "az", "data_files": [{"split": "test", "path": "az/test-*"}]}, {"config_name": "be", "data_files": [{"split": "test", "path": "be/test-*"}]}, {"config_name": "bg", "data_files": [{"split": "test", "path": "bg/test-*"}]}, {"config_name": "bn", "data_files": [{"split": "test", "path": "bn/test-*"}]}, {"config_name": "ca", "data_files": [{"split": "test", "path": "ca/test-*"}]}, {"config_name": "ceb", "data_files": [{"split": "test", "path": "ceb/test-*"}]}, {"config_name": "cs", "data_files": [{"split": "test", "path": "cs/test-*"}]}, {"config_name": "cy", "data_files": [{"split": "test", "path": "cy/test-*"}]}, {"config_name": "da", "data_files": [{"split": "test", "path": "da/test-*"}]}, {"config_name": "de", "data_files": [{"split": "test", "path": "de/test-*"}]}, {"config_name": "el", "data_files": [{"split": "test", "path": "el/test-*"}]}, {"config_name": "en", "data_files": [{"split": "test", "path": "en/test-*"}]}, {"config_name": "es", "data_files": [{"split": "test", "path": "es/test-*"}]}, {"config_name": "et", "data_files": [{"split": "test", "path": "et/test-*"}]}, {"config_name": "eu", "data_files": [{"split": "test", "path": "eu/test-*"}]}, {"config_name": "fa", "data_files": [{"split": "test", "path": "fa/test-*"}]}, {"config_name": "fi", "data_files": [{"split": "test", "path": "fi/test-*"}]}, {"config_name": "fr", "data_files": [{"split": "test", "path": "fr/test-*"}]}, {"config_name": "ga", "data_files": [{"split": "test", "path": "ga/test-*"}]}, {"config_name": "gl", "data_files": [{"split": "test", "path": "gl/test-*"}]}, {"config_name": "he", "data_files": [{"split": "test", "path": "he/test-*"}]}, {"config_name": "hi", "data_files": [{"split": "test", "path": "hi/test-*"}]}, {"config_name": "hr", "data_files": [{"split": "test", "path": "hr/test-*"}]}, {"config_name": "hu", "data_files": [{"split": "test", "path": "hu/test-*"}]}, {"config_name": "hy", "data_files": [{"split": "test", "path": "hy/test-*"}]}, {"config_name": "id", "data_files": [{"split": "test", "path": "id/test-*"}]}, {"config_name": "it", "data_files": [{"split": "test", "path": "it/test-*"}]}, {"config_name": "ja", "data_files": [{"split": "test", "path": "ja/test-*"}]}, {"config_name": "ka", "data_files": [{"split": "test", "path": "ka/test-*"}]}, {"config_name": "ko", "data_files": [{"split": "test", "path": "ko/test-*"}]}, {"config_name": "la", "data_files": [{"split": "test", "path": "la/test-*"}]}, {"config_name": "lt", "data_files": [{"split": "test", "path": "lt/test-*"}]}, {"config_name": "lv", "data_files": [{"split": "test", "path": "lv/test-*"}]}, {"config_name": "ms", "data_files": [{"split": "test", "path": "ms/test-*"}]}, {"config_name": "nl", "data_files": [{"split": "test", "path": "nl/test-*"}]}, {"config_name": "pl", "data_files": [{"split": "test", "path": "pl/test-*"}]}, {"config_name": "pt", "data_files": [{"split": "test", "path": "pt/test-*"}]}, {"config_name": "ro", "data_files": [{"split": "test", "path": "ro/test-*"}]}, {"config_name": "ru", "data_files": [{"split": "test", "path": "ru/test-*"}]}, {"config_name": "sk", "data_files": [{"split": "test", "path": "sk/test-*"}]}, {"config_name": "sl", "data_files": [{"split": "test", "path": "sl/test-*"}]}, {"config_name": "sq", "data_files": [{"split": "test", "path": "sq/test-*"}]}, {"config_name": "sr", "data_files": [{"split": "test", "path": "sr/test-*"}]}, {"config_name": "sv", "data_files": [{"split": "test", "path": "sv/test-*"}]}, {"config_name": "ta", "data_files": [{"split": "test", "path": "ta/test-*"}]}, {"config_name": "th", "data_files": [{"split": "test", "path": "th/test-*"}]}, {"config_name": "tr", "data_files": [{"split": "test", "path": "tr/test-*"}]}, {"config_name": "uk", "data_files": [{"split": "test", "path": "uk/test-*"}]}, {"config_name": "ur", "data_files": [{"split": "test", "path": "ur/test-*"}]}, {"config_name": "vi", "data_files": [{"split": "test", "path": "vi/test-*"}]}, {"config_name": "zh", "data_files": [{"split": "test", "path": "zh/test-*"}]}]}
2024-01-10T16:11:29+00:00
[]
[]
TAGS #region-us
Extension/Modification of the original m_lama dataset
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
6fb1674fbbe32c42061ceb6099dae7c6cab8c265
# Dataset Card for "fashion_image_caption-100-v2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
bs-code/fashion_image_caption-100-v2
[ "region:us" ]
2023-12-23T11:17:07+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 22820471.0, "num_examples": 100}], "download_size": 22820373, "dataset_size": 22820471.0}}
2023-12-23T11:17:08+00:00
[]
[]
TAGS #region-us
# Dataset Card for "fashion_image_caption-100-v2" More Information needed
[ "# Dataset Card for \"fashion_image_caption-100-v2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"fashion_image_caption-100-v2\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"fashion_image_caption-100-v2\"\n\nMore Information needed" ]
6db7acaa758b44941e0cbe1680408db94c9159f5
# Dataset Card for "fabric-designs" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
AfhamAhmed1/fabric-designs
[ "region:us" ]
2023-12-23T12:59:34+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "animal_designs", "1": "autumn_designs", "2": "damask_designs", "3": "floral_designs", "4": "geometric_designs"}}}}], "splits": [{"name": "train", "num_bytes": 25660006.988, "num_examples": 1274}], "download_size": 27096997, "dataset_size": 25660006.988}}
2023-12-23T13:00:01+00:00
[]
[]
TAGS #region-us
# Dataset Card for "fabric-designs" More Information needed
[ "# Dataset Card for \"fabric-designs\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"fabric-designs\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"fabric-designs\"\n\nMore Information needed" ]
7bdd6147ac725f02e74e99b6a3c8795d8693d5c7
This is a sample dataset for an exercise to try out training an existing pre-trained tokenizer. Background: Scientists have discovered a new tribe. This new tribe worship the celestial bodies (moon is their god). The tribe uses a language referred to as 'lunarian'. Scientists have decided to build a LLM to help them communicate with the tribe. The LLM will carry out lunarian-to-english translation. Our task is to train an existing BERT based tokenizer. The new tokenizer will be used for training the LLM.
acloudfan/lunarian-fictious-language
[ "license:apache-2.0", "region:us" ]
2023-12-23T13:22:00+00:00
{"license": "apache-2.0"}
2023-12-23T14:08:42+00:00
[]
[]
TAGS #license-apache-2.0 #region-us
This is a sample dataset for an exercise to try out training an existing pre-trained tokenizer. Background: Scientists have discovered a new tribe. This new tribe worship the celestial bodies (moon is their god). The tribe uses a language referred to as 'lunarian'. Scientists have decided to build a LLM to help them communicate with the tribe. The LLM will carry out lunarian-to-english translation. Our task is to train an existing BERT based tokenizer. The new tokenizer will be used for training the LLM.
[]
[ "TAGS\n#license-apache-2.0 #region-us \n" ]
[ 14 ]
[ "passage: TAGS\n#license-apache-2.0 #region-us \n" ]
e4b3d7a5676403cc425b5cc23828f1845ff1fc8f
# Dataset Card for Evaluation run of namirocks/tutor-model-13b-ep3 <!-- Provide a quick summary of the dataset. --> Dataset automatically created during the evaluation run of model [namirocks/tutor-model-13b-ep3](https://huggingface.co/namirocks/tutor-model-13b-ep3) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_namirocks__tutor-model-13b-ep3", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-12-23T14:22:10.523504](https://huggingface.co/datasets/open-llm-leaderboard/details_namirocks__tutor-model-13b-ep3/blob/main/results_2023-12-23T14-22-10.523504.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.565791735957054, "acc_stderr": 0.03351962241327427, "acc_norm": 0.5744059102733704, "acc_norm_stderr": 0.03427708379273797, "mc1": 0.3537331701346389, "mc1_stderr": 0.016737814358846147, "mc2": 0.5298607855616481, "mc2_stderr": 0.015266264009722644 }, "harness|arc:challenge|25": { "acc": 0.5298634812286689, "acc_stderr": 0.014585305840007105, "acc_norm": 0.5733788395904437, "acc_norm_stderr": 0.014453185592920293 }, "harness|hellaswag|10": { "acc": 0.6179047998406691, "acc_stderr": 0.004849065962692133, "acc_norm": 0.8150766779525991, "acc_norm_stderr": 0.0038744190656586222 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.31, "acc_stderr": 0.04648231987117316, "acc_norm": 0.31, "acc_norm_stderr": 0.04648231987117316 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.45185185185185184, "acc_stderr": 0.04299268905480864, "acc_norm": 0.45185185185185184, "acc_norm_stderr": 0.04299268905480864 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.5789473684210527, "acc_stderr": 0.040179012759817494, "acc_norm": 0.5789473684210527, "acc_norm_stderr": 0.040179012759817494 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.6, "acc_stderr": 0.049236596391733084, "acc_norm": 0.6, "acc_norm_stderr": 0.049236596391733084 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.6075471698113207, "acc_stderr": 0.03005258057955785, "acc_norm": 0.6075471698113207, "acc_norm_stderr": 0.03005258057955785 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.6527777777777778, "acc_stderr": 0.039812405437178615, "acc_norm": 0.6527777777777778, "acc_norm_stderr": 0.039812405437178615 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.48, "acc_stderr": 0.05021167315686781, "acc_norm": 0.48, "acc_norm_stderr": 0.05021167315686781 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.38, "acc_stderr": 0.048783173121456316, "acc_norm": 0.38, "acc_norm_stderr": 0.048783173121456316 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.34, "acc_stderr": 0.04760952285695236, "acc_norm": 0.34, "acc_norm_stderr": 0.04760952285695236 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.5260115606936416, "acc_stderr": 0.038073017265045125, "acc_norm": 0.5260115606936416, "acc_norm_stderr": 0.038073017265045125 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.3627450980392157, "acc_stderr": 0.047840607041056527, "acc_norm": 0.3627450980392157, "acc_norm_stderr": 0.047840607041056527 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.68, "acc_stderr": 0.04688261722621505, "acc_norm": 0.68, "acc_norm_stderr": 0.04688261722621505 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.4297872340425532, "acc_stderr": 0.03236214467715564, "acc_norm": 0.4297872340425532, "acc_norm_stderr": 0.03236214467715564 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.2543859649122807, "acc_stderr": 0.040969851398436716, "acc_norm": 0.2543859649122807, "acc_norm_stderr": 0.040969851398436716 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.5172413793103449, "acc_stderr": 0.04164188720169375, "acc_norm": 0.5172413793103449, "acc_norm_stderr": 0.04164188720169375 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.3253968253968254, "acc_stderr": 0.024130158299762613, "acc_norm": 0.3253968253968254, "acc_norm_stderr": 0.024130158299762613 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.373015873015873, "acc_stderr": 0.04325506042017086, "acc_norm": 0.373015873015873, "acc_norm_stderr": 0.04325506042017086 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.47, "acc_stderr": 0.05016135580465919, "acc_norm": 0.47, "acc_norm_stderr": 0.05016135580465919 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.6774193548387096, "acc_stderr": 0.026593084516572274, "acc_norm": 0.6774193548387096, "acc_norm_stderr": 0.026593084516572274 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.4827586206896552, "acc_stderr": 0.035158955511656986, "acc_norm": 0.4827586206896552, "acc_norm_stderr": 0.035158955511656986 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.58, "acc_stderr": 0.04960449637488584, "acc_norm": 0.58, "acc_norm_stderr": 0.04960449637488584 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.7090909090909091, "acc_stderr": 0.03546563019624336, "acc_norm": 0.7090909090909091, "acc_norm_stderr": 0.03546563019624336 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.696969696969697, "acc_stderr": 0.032742879140268674, "acc_norm": 0.696969696969697, "acc_norm_stderr": 0.032742879140268674 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.8186528497409327, "acc_stderr": 0.027807032360686088, "acc_norm": 0.8186528497409327, "acc_norm_stderr": 0.027807032360686088 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.5743589743589743, "acc_stderr": 0.02506909438729653, "acc_norm": 0.5743589743589743, "acc_norm_stderr": 0.02506909438729653 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.31851851851851853, "acc_stderr": 0.028406533090608463, "acc_norm": 0.31851851851851853, "acc_norm_stderr": 0.028406533090608463 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.5966386554621849, "acc_stderr": 0.03186608121408831, "acc_norm": 0.5966386554621849, "acc_norm_stderr": 0.03186608121408831 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.2913907284768212, "acc_stderr": 0.03710185726119995, "acc_norm": 0.2913907284768212, "acc_norm_stderr": 0.03710185726119995 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.7522935779816514, "acc_stderr": 0.018508143602547832, "acc_norm": 0.7522935779816514, "acc_norm_stderr": 0.018508143602547832 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.4583333333333333, "acc_stderr": 0.03398110890294635, "acc_norm": 0.4583333333333333, "acc_norm_stderr": 0.03398110890294635 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.7352941176470589, "acc_stderr": 0.0309645179269234, "acc_norm": 0.7352941176470589, "acc_norm_stderr": 0.0309645179269234 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.7510548523206751, "acc_stderr": 0.028146970599422644, "acc_norm": 0.7510548523206751, "acc_norm_stderr": 0.028146970599422644 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.6547085201793722, "acc_stderr": 0.03191100192835794, "acc_norm": 0.6547085201793722, "acc_norm_stderr": 0.03191100192835794 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.6564885496183206, "acc_stderr": 0.041649760719448786, "acc_norm": 0.6564885496183206, "acc_norm_stderr": 0.041649760719448786 }, "harness|hendrycksTest-international_law|5": { "acc": 0.7768595041322314, "acc_stderr": 0.03800754475228733, "acc_norm": 0.7768595041322314, "acc_norm_stderr": 0.03800754475228733 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.7222222222222222, "acc_stderr": 0.04330043749650742, "acc_norm": 0.7222222222222222, "acc_norm_stderr": 0.04330043749650742 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.6932515337423313, "acc_stderr": 0.03623089915724147, "acc_norm": 0.6932515337423313, "acc_norm_stderr": 0.03623089915724147 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.375, "acc_stderr": 0.04595091388086298, "acc_norm": 0.375, "acc_norm_stderr": 0.04595091388086298 }, "harness|hendrycksTest-management|5": { "acc": 0.7378640776699029, "acc_stderr": 0.043546310772605956, "acc_norm": 0.7378640776699029, "acc_norm_stderr": 0.043546310772605956 }, "harness|hendrycksTest-marketing|5": { "acc": 0.8547008547008547, "acc_stderr": 0.023086635086841407, "acc_norm": 0.8547008547008547, "acc_norm_stderr": 0.023086635086841407 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.55, "acc_stderr": 0.04999999999999999, "acc_norm": 0.55, "acc_norm_stderr": 0.04999999999999999 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.7496807151979565, "acc_stderr": 0.01549108895149459, "acc_norm": 0.7496807151979565, "acc_norm_stderr": 0.01549108895149459 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.615606936416185, "acc_stderr": 0.026189666966272035, "acc_norm": 0.615606936416185, "acc_norm_stderr": 0.026189666966272035 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.37206703910614525, "acc_stderr": 0.0161658475835633, "acc_norm": 0.37206703910614525, "acc_norm_stderr": 0.0161658475835633 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.6111111111111112, "acc_stderr": 0.027914055510468008, "acc_norm": 0.6111111111111112, "acc_norm_stderr": 0.027914055510468008 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.6302250803858521, "acc_stderr": 0.02741799670563099, "acc_norm": 0.6302250803858521, "acc_norm_stderr": 0.02741799670563099 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.6358024691358025, "acc_stderr": 0.026774929899722324, "acc_norm": 0.6358024691358025, "acc_norm_stderr": 0.026774929899722324 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.4326241134751773, "acc_stderr": 0.029555454236778855, "acc_norm": 0.4326241134751773, "acc_norm_stderr": 0.029555454236778855 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.424380704041721, "acc_stderr": 0.012623343757430017, "acc_norm": 0.424380704041721, "acc_norm_stderr": 0.012623343757430017 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.5514705882352942, "acc_stderr": 0.0302114796091216, "acc_norm": 0.5514705882352942, "acc_norm_stderr": 0.0302114796091216 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.5571895424836601, "acc_stderr": 0.02009508315457734, "acc_norm": 0.5571895424836601, "acc_norm_stderr": 0.02009508315457734 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.6727272727272727, "acc_stderr": 0.0449429086625209, "acc_norm": 0.6727272727272727, "acc_norm_stderr": 0.0449429086625209 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.6448979591836734, "acc_stderr": 0.030635655150387634, "acc_norm": 0.6448979591836734, "acc_norm_stderr": 0.030635655150387634 }, "harness|hendrycksTest-sociology|5": { "acc": 0.7562189054726368, "acc_stderr": 0.03036049015401466, "acc_norm": 0.7562189054726368, "acc_norm_stderr": 0.03036049015401466 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.82, "acc_stderr": 0.038612291966536934, "acc_norm": 0.82, "acc_norm_stderr": 0.038612291966536934 }, "harness|hendrycksTest-virology|5": { "acc": 0.4578313253012048, "acc_stderr": 0.038786267710023595, "acc_norm": 0.4578313253012048, "acc_norm_stderr": 0.038786267710023595 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.7660818713450293, "acc_stderr": 0.03246721765117826, "acc_norm": 0.7660818713450293, "acc_norm_stderr": 0.03246721765117826 }, "harness|truthfulqa:mc|0": { "mc1": 0.3537331701346389, "mc1_stderr": 0.016737814358846147, "mc2": 0.5298607855616481, "mc2_stderr": 0.015266264009722644 }, "harness|winogrande|5": { "acc": 0.7434885556432518, "acc_stderr": 0.012273648008759987 }, "harness|gsm8k|5": { "acc": 0.12054586808188021, "acc_stderr": 0.008968608285309085 } } ``` ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
open-llm-leaderboard/details_namirocks__tutor-model-13b-ep3
[ "region:us" ]
2023-12-23T14:24:31+00:00
{"pretty_name": "Evaluation run of namirocks/tutor-model-13b-ep3", "dataset_summary": "Dataset automatically created during the evaluation run of model [namirocks/tutor-model-13b-ep3](https://huggingface.co/namirocks/tutor-model-13b-ep3) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_namirocks__tutor-model-13b-ep3\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-12-23T14:22:10.523504](https://huggingface.co/datasets/open-llm-leaderboard/details_namirocks__tutor-model-13b-ep3/blob/main/results_2023-12-23T14-22-10.523504.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.565791735957054,\n \"acc_stderr\": 0.03351962241327427,\n \"acc_norm\": 0.5744059102733704,\n \"acc_norm_stderr\": 0.03427708379273797,\n \"mc1\": 0.3537331701346389,\n \"mc1_stderr\": 0.016737814358846147,\n \"mc2\": 0.5298607855616481,\n \"mc2_stderr\": 0.015266264009722644\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.5298634812286689,\n \"acc_stderr\": 0.014585305840007105,\n \"acc_norm\": 0.5733788395904437,\n \"acc_norm_stderr\": 0.014453185592920293\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.6179047998406691,\n \"acc_stderr\": 0.004849065962692133,\n \"acc_norm\": 0.8150766779525991,\n \"acc_norm_stderr\": 0.0038744190656586222\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.31,\n \"acc_stderr\": 0.04648231987117316,\n \"acc_norm\": 0.31,\n \"acc_norm_stderr\": 0.04648231987117316\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.45185185185185184,\n \"acc_stderr\": 0.04299268905480864,\n \"acc_norm\": 0.45185185185185184,\n \"acc_norm_stderr\": 0.04299268905480864\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.5789473684210527,\n \"acc_stderr\": 0.040179012759817494,\n \"acc_norm\": 0.5789473684210527,\n \"acc_norm_stderr\": 0.040179012759817494\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.6,\n \"acc_stderr\": 0.049236596391733084,\n \"acc_norm\": 0.6,\n \"acc_norm_stderr\": 0.049236596391733084\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.6075471698113207,\n \"acc_stderr\": 0.03005258057955785,\n \"acc_norm\": 0.6075471698113207,\n \"acc_norm_stderr\": 0.03005258057955785\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.6527777777777778,\n \"acc_stderr\": 0.039812405437178615,\n \"acc_norm\": 0.6527777777777778,\n \"acc_norm_stderr\": 0.039812405437178615\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.48,\n \"acc_stderr\": 0.05021167315686781,\n \"acc_norm\": 0.48,\n \"acc_norm_stderr\": 0.05021167315686781\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.38,\n \"acc_stderr\": 0.048783173121456316,\n \"acc_norm\": 0.38,\n \"acc_norm_stderr\": 0.048783173121456316\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.34,\n \"acc_stderr\": 0.04760952285695236,\n \"acc_norm\": 0.34,\n \"acc_norm_stderr\": 0.04760952285695236\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.5260115606936416,\n \"acc_stderr\": 0.038073017265045125,\n \"acc_norm\": 0.5260115606936416,\n \"acc_norm_stderr\": 0.038073017265045125\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.3627450980392157,\n \"acc_stderr\": 0.047840607041056527,\n \"acc_norm\": 0.3627450980392157,\n \"acc_norm_stderr\": 0.047840607041056527\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.68,\n \"acc_stderr\": 0.04688261722621505,\n \"acc_norm\": 0.68,\n \"acc_norm_stderr\": 0.04688261722621505\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.4297872340425532,\n \"acc_stderr\": 0.03236214467715564,\n \"acc_norm\": 0.4297872340425532,\n \"acc_norm_stderr\": 0.03236214467715564\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.2543859649122807,\n \"acc_stderr\": 0.040969851398436716,\n \"acc_norm\": 0.2543859649122807,\n \"acc_norm_stderr\": 0.040969851398436716\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.5172413793103449,\n \"acc_stderr\": 0.04164188720169375,\n \"acc_norm\": 0.5172413793103449,\n \"acc_norm_stderr\": 0.04164188720169375\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.3253968253968254,\n \"acc_stderr\": 0.024130158299762613,\n \"acc_norm\": 0.3253968253968254,\n \"acc_norm_stderr\": 0.024130158299762613\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.373015873015873,\n \"acc_stderr\": 0.04325506042017086,\n \"acc_norm\": 0.373015873015873,\n \"acc_norm_stderr\": 0.04325506042017086\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.47,\n \"acc_stderr\": 0.05016135580465919,\n \"acc_norm\": 0.47,\n \"acc_norm_stderr\": 0.05016135580465919\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.6774193548387096,\n \"acc_stderr\": 0.026593084516572274,\n \"acc_norm\": 0.6774193548387096,\n \"acc_norm_stderr\": 0.026593084516572274\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.4827586206896552,\n \"acc_stderr\": 0.035158955511656986,\n \"acc_norm\": 0.4827586206896552,\n \"acc_norm_stderr\": 0.035158955511656986\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.58,\n \"acc_stderr\": 0.04960449637488584,\n \"acc_norm\": 0.58,\n \"acc_norm_stderr\": 0.04960449637488584\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.7090909090909091,\n \"acc_stderr\": 0.03546563019624336,\n \"acc_norm\": 0.7090909090909091,\n \"acc_norm_stderr\": 0.03546563019624336\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.696969696969697,\n \"acc_stderr\": 0.032742879140268674,\n \"acc_norm\": 0.696969696969697,\n \"acc_norm_stderr\": 0.032742879140268674\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.8186528497409327,\n \"acc_stderr\": 0.027807032360686088,\n \"acc_norm\": 0.8186528497409327,\n \"acc_norm_stderr\": 0.027807032360686088\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.5743589743589743,\n \"acc_stderr\": 0.02506909438729653,\n \"acc_norm\": 0.5743589743589743,\n \"acc_norm_stderr\": 0.02506909438729653\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.31851851851851853,\n \"acc_stderr\": 0.028406533090608463,\n \"acc_norm\": 0.31851851851851853,\n \"acc_norm_stderr\": 0.028406533090608463\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.5966386554621849,\n \"acc_stderr\": 0.03186608121408831,\n \"acc_norm\": 0.5966386554621849,\n \"acc_norm_stderr\": 0.03186608121408831\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.2913907284768212,\n \"acc_stderr\": 0.03710185726119995,\n \"acc_norm\": 0.2913907284768212,\n \"acc_norm_stderr\": 0.03710185726119995\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.7522935779816514,\n \"acc_stderr\": 0.018508143602547832,\n \"acc_norm\": 0.7522935779816514,\n \"acc_norm_stderr\": 0.018508143602547832\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.4583333333333333,\n \"acc_stderr\": 0.03398110890294635,\n \"acc_norm\": 0.4583333333333333,\n \"acc_norm_stderr\": 0.03398110890294635\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.7352941176470589,\n \"acc_stderr\": 0.0309645179269234,\n \"acc_norm\": 0.7352941176470589,\n \"acc_norm_stderr\": 0.0309645179269234\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.7510548523206751,\n \"acc_stderr\": 0.028146970599422644,\n \"acc_norm\": 0.7510548523206751,\n \"acc_norm_stderr\": 0.028146970599422644\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.6547085201793722,\n \"acc_stderr\": 0.03191100192835794,\n \"acc_norm\": 0.6547085201793722,\n \"acc_norm_stderr\": 0.03191100192835794\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.6564885496183206,\n \"acc_stderr\": 0.041649760719448786,\n \"acc_norm\": 0.6564885496183206,\n \"acc_norm_stderr\": 0.041649760719448786\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.7768595041322314,\n \"acc_stderr\": 0.03800754475228733,\n \"acc_norm\": 0.7768595041322314,\n \"acc_norm_stderr\": 0.03800754475228733\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.7222222222222222,\n \"acc_stderr\": 0.04330043749650742,\n \"acc_norm\": 0.7222222222222222,\n \"acc_norm_stderr\": 0.04330043749650742\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.6932515337423313,\n \"acc_stderr\": 0.03623089915724147,\n \"acc_norm\": 0.6932515337423313,\n \"acc_norm_stderr\": 0.03623089915724147\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.375,\n \"acc_stderr\": 0.04595091388086298,\n \"acc_norm\": 0.375,\n \"acc_norm_stderr\": 0.04595091388086298\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.7378640776699029,\n \"acc_stderr\": 0.043546310772605956,\n \"acc_norm\": 0.7378640776699029,\n \"acc_norm_stderr\": 0.043546310772605956\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.8547008547008547,\n \"acc_stderr\": 0.023086635086841407,\n \"acc_norm\": 0.8547008547008547,\n \"acc_norm_stderr\": 0.023086635086841407\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.55,\n \"acc_stderr\": 0.04999999999999999,\n \"acc_norm\": 0.55,\n \"acc_norm_stderr\": 0.04999999999999999\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.7496807151979565,\n \"acc_stderr\": 0.01549108895149459,\n \"acc_norm\": 0.7496807151979565,\n \"acc_norm_stderr\": 0.01549108895149459\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.615606936416185,\n \"acc_stderr\": 0.026189666966272035,\n \"acc_norm\": 0.615606936416185,\n \"acc_norm_stderr\": 0.026189666966272035\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.37206703910614525,\n \"acc_stderr\": 0.0161658475835633,\n \"acc_norm\": 0.37206703910614525,\n \"acc_norm_stderr\": 0.0161658475835633\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.6111111111111112,\n \"acc_stderr\": 0.027914055510468008,\n \"acc_norm\": 0.6111111111111112,\n \"acc_norm_stderr\": 0.027914055510468008\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.6302250803858521,\n \"acc_stderr\": 0.02741799670563099,\n \"acc_norm\": 0.6302250803858521,\n \"acc_norm_stderr\": 0.02741799670563099\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.6358024691358025,\n \"acc_stderr\": 0.026774929899722324,\n \"acc_norm\": 0.6358024691358025,\n \"acc_norm_stderr\": 0.026774929899722324\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.4326241134751773,\n \"acc_stderr\": 0.029555454236778855,\n \"acc_norm\": 0.4326241134751773,\n \"acc_norm_stderr\": 0.029555454236778855\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.424380704041721,\n \"acc_stderr\": 0.012623343757430017,\n \"acc_norm\": 0.424380704041721,\n \"acc_norm_stderr\": 0.012623343757430017\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.5514705882352942,\n \"acc_stderr\": 0.0302114796091216,\n \"acc_norm\": 0.5514705882352942,\n \"acc_norm_stderr\": 0.0302114796091216\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.5571895424836601,\n \"acc_stderr\": 0.02009508315457734,\n \"acc_norm\": 0.5571895424836601,\n \"acc_norm_stderr\": 0.02009508315457734\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.6727272727272727,\n \"acc_stderr\": 0.0449429086625209,\n \"acc_norm\": 0.6727272727272727,\n \"acc_norm_stderr\": 0.0449429086625209\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.6448979591836734,\n \"acc_stderr\": 0.030635655150387634,\n \"acc_norm\": 0.6448979591836734,\n \"acc_norm_stderr\": 0.030635655150387634\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.7562189054726368,\n \"acc_stderr\": 0.03036049015401466,\n \"acc_norm\": 0.7562189054726368,\n \"acc_norm_stderr\": 0.03036049015401466\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.82,\n \"acc_stderr\": 0.038612291966536934,\n \"acc_norm\": 0.82,\n \"acc_norm_stderr\": 0.038612291966536934\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.4578313253012048,\n \"acc_stderr\": 0.038786267710023595,\n \"acc_norm\": 0.4578313253012048,\n \"acc_norm_stderr\": 0.038786267710023595\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.7660818713450293,\n \"acc_stderr\": 0.03246721765117826,\n \"acc_norm\": 0.7660818713450293,\n \"acc_norm_stderr\": 0.03246721765117826\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.3537331701346389,\n \"mc1_stderr\": 0.016737814358846147,\n \"mc2\": 0.5298607855616481,\n \"mc2_stderr\": 0.015266264009722644\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.7434885556432518,\n \"acc_stderr\": 0.012273648008759987\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.12054586808188021,\n \"acc_stderr\": 0.008968608285309085\n }\n}\n```", "repo_url": "https://huggingface.co/namirocks/tutor-model-13b-ep3", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|arc:challenge|25_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|gsm8k|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hellaswag|10_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-23T14-22-10.523504.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["**/details_harness|winogrande|5_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-12-23T14-22-10.523504.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_12_23T14_22_10.523504", "path": ["results_2023-12-23T14-22-10.523504.parquet"]}, {"split": "latest", "path": ["results_2023-12-23T14-22-10.523504.parquet"]}]}]}
2023-12-23T14:24:51+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of namirocks/tutor-model-13b-ep3 Dataset automatically created during the evaluation run of model namirocks/tutor-model-13b-ep3 on the Open LLM Leaderboard. The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-12-23T14:22:10.523504(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Evaluation run of namirocks/tutor-model-13b-ep3\n\n\n\nDataset automatically created during the evaluation run of model namirocks/tutor-model-13b-ep3 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-23T14:22:10.523504(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of namirocks/tutor-model-13b-ep3\n\n\n\nDataset automatically created during the evaluation run of model namirocks/tutor-model-13b-ep3 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-23T14:22:10.523504(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 187, 67, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of namirocks/tutor-model-13b-ep3\n\n\n\nDataset automatically created during the evaluation run of model namirocks/tutor-model-13b-ep3 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-12-23T14:22:10.523504(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
89c869820e2cb38b6d0cd59a3ce7751cf12dcbbf
# Dataset Card for Evaluation run of mwitiderrick/open_llama_3b_glaive_v0.1 <!-- Provide a quick summary of the dataset. --> Dataset automatically created during the evaluation run of model [mwitiderrick/open_llama_3b_glaive_v0.1](https://huggingface.co/mwitiderrick/open_llama_3b_glaive_v0.1) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_mwitiderrick__open_llama_3b_glaive_v0.1", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-12-23T15:34:19.150703](https://huggingface.co/datasets/open-llm-leaderboard/details_mwitiderrick__open_llama_3b_glaive_v0.1/blob/main/results_2023-12-23T15-34-19.150703.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.2843747535406573, "acc_stderr": 0.031689110133124844, "acc_norm": 0.28633888958645765, "acc_norm_stderr": 0.03246963675970039, "mc1": 0.23623011015911874, "mc1_stderr": 0.014869755015871114, "mc2": 0.3585983664640556, "mc2_stderr": 0.013742745779138914 }, "harness|arc:challenge|25": { "acc": 0.3703071672354949, "acc_stderr": 0.01411129875167495, "acc_norm": 0.4069965870307167, "acc_norm_stderr": 0.014356399418009131 }, "harness|hellaswag|10": { "acc": 0.4971121290579566, "acc_stderr": 0.004989698183207831, "acc_norm": 0.6744672376020713, "acc_norm_stderr": 0.004676159299105414 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.33, "acc_stderr": 0.047258156262526045, "acc_norm": 0.33, "acc_norm_stderr": 0.047258156262526045 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.2814814814814815, "acc_stderr": 0.03885004245800254, "acc_norm": 0.2814814814814815, "acc_norm_stderr": 0.03885004245800254 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.21052631578947367, "acc_stderr": 0.03317672787533157, "acc_norm": 0.21052631578947367, "acc_norm_stderr": 0.03317672787533157 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.27, "acc_stderr": 0.044619604333847394, "acc_norm": 0.27, "acc_norm_stderr": 0.044619604333847394 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.29056603773584905, "acc_stderr": 0.027943219989337156, "acc_norm": 0.29056603773584905, "acc_norm_stderr": 0.027943219989337156 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.2638888888888889, "acc_stderr": 0.03685651095897532, "acc_norm": 0.2638888888888889, "acc_norm_stderr": 0.03685651095897532 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.23, "acc_stderr": 0.04229525846816505, "acc_norm": 0.23, "acc_norm_stderr": 0.04229525846816505 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.34, "acc_stderr": 0.04760952285695235, "acc_norm": 0.34, "acc_norm_stderr": 0.04760952285695235 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.26, "acc_stderr": 0.044084400227680794, "acc_norm": 0.26, "acc_norm_stderr": 0.044084400227680794 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.3236994219653179, "acc_stderr": 0.035676037996391685, "acc_norm": 0.3236994219653179, "acc_norm_stderr": 0.035676037996391685 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.20588235294117646, "acc_stderr": 0.04023382273617749, "acc_norm": 0.20588235294117646, "acc_norm_stderr": 0.04023382273617749 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.29, "acc_stderr": 0.045604802157206845, "acc_norm": 0.29, "acc_norm_stderr": 0.045604802157206845 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.3148936170212766, "acc_stderr": 0.030363582197238167, "acc_norm": 0.3148936170212766, "acc_norm_stderr": 0.030363582197238167 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.3333333333333333, "acc_stderr": 0.044346007015849245, "acc_norm": 0.3333333333333333, "acc_norm_stderr": 0.044346007015849245 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.27586206896551724, "acc_stderr": 0.037245636197746325, "acc_norm": 0.27586206896551724, "acc_norm_stderr": 0.037245636197746325 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.25396825396825395, "acc_stderr": 0.022418042891113932, "acc_norm": 0.25396825396825395, "acc_norm_stderr": 0.022418042891113932 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.20634920634920634, "acc_stderr": 0.03619604524124252, "acc_norm": 0.20634920634920634, "acc_norm_stderr": 0.03619604524124252 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.31, "acc_stderr": 0.04648231987117316, "acc_norm": 0.31, "acc_norm_stderr": 0.04648231987117316 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.2645161290322581, "acc_stderr": 0.02509189237885928, "acc_norm": 0.2645161290322581, "acc_norm_stderr": 0.02509189237885928 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.30049261083743845, "acc_stderr": 0.03225799476233484, "acc_norm": 0.30049261083743845, "acc_norm_stderr": 0.03225799476233484 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.15, "acc_stderr": 0.035887028128263714, "acc_norm": 0.15, "acc_norm_stderr": 0.035887028128263714 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.26666666666666666, "acc_stderr": 0.034531318018854146, "acc_norm": 0.26666666666666666, "acc_norm_stderr": 0.034531318018854146 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.31313131313131315, "acc_stderr": 0.03304205087813653, "acc_norm": 0.31313131313131315, "acc_norm_stderr": 0.03304205087813653 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.27461139896373055, "acc_stderr": 0.03221024508041154, "acc_norm": 0.27461139896373055, "acc_norm_stderr": 0.03221024508041154 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.32564102564102565, "acc_stderr": 0.02375966576741229, "acc_norm": 0.32564102564102565, "acc_norm_stderr": 0.02375966576741229 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.2518518518518518, "acc_stderr": 0.02646611753895991, "acc_norm": 0.2518518518518518, "acc_norm_stderr": 0.02646611753895991 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.2773109243697479, "acc_stderr": 0.029079374539480007, "acc_norm": 0.2773109243697479, "acc_norm_stderr": 0.029079374539480007 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.26490066225165565, "acc_stderr": 0.03603038545360383, "acc_norm": 0.26490066225165565, "acc_norm_stderr": 0.03603038545360383 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.27339449541284405, "acc_stderr": 0.019109299846098275, "acc_norm": 0.27339449541284405, "acc_norm_stderr": 0.019109299846098275 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.4722222222222222, "acc_stderr": 0.0340470532865388, "acc_norm": 0.4722222222222222, "acc_norm_stderr": 0.0340470532865388 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.24019607843137256, "acc_stderr": 0.02998373305591361, "acc_norm": 0.24019607843137256, "acc_norm_stderr": 0.02998373305591361 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.25316455696202533, "acc_stderr": 0.028304657943035303, "acc_norm": 0.25316455696202533, "acc_norm_stderr": 0.028304657943035303 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.3452914798206278, "acc_stderr": 0.03191100192835794, "acc_norm": 0.3452914798206278, "acc_norm_stderr": 0.03191100192835794 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.22137404580152673, "acc_stderr": 0.03641297081313728, "acc_norm": 0.22137404580152673, "acc_norm_stderr": 0.03641297081313728 }, "harness|hendrycksTest-international_law|5": { "acc": 0.2975206611570248, "acc_stderr": 0.041733491480834974, "acc_norm": 0.2975206611570248, "acc_norm_stderr": 0.041733491480834974 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.28703703703703703, "acc_stderr": 0.043733130409147614, "acc_norm": 0.28703703703703703, "acc_norm_stderr": 0.043733130409147614 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.26993865030674846, "acc_stderr": 0.034878251684978906, "acc_norm": 0.26993865030674846, "acc_norm_stderr": 0.034878251684978906 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.26785714285714285, "acc_stderr": 0.04203277291467764, "acc_norm": 0.26785714285714285, "acc_norm_stderr": 0.04203277291467764 }, "harness|hendrycksTest-management|5": { "acc": 0.2524271844660194, "acc_stderr": 0.04301250399690877, "acc_norm": 0.2524271844660194, "acc_norm_stderr": 0.04301250399690877 }, "harness|hendrycksTest-marketing|5": { "acc": 0.2692307692307692, "acc_stderr": 0.02905858830374884, "acc_norm": 0.2692307692307692, "acc_norm_stderr": 0.02905858830374884 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.25, "acc_stderr": 0.04351941398892446, "acc_norm": 0.25, "acc_norm_stderr": 0.04351941398892446 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.28991060025542786, "acc_stderr": 0.016225017944770957, "acc_norm": 0.28991060025542786, "acc_norm_stderr": 0.016225017944770957 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.25722543352601157, "acc_stderr": 0.02353292543104428, "acc_norm": 0.25722543352601157, "acc_norm_stderr": 0.02353292543104428 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.24692737430167597, "acc_stderr": 0.014422292204808835, "acc_norm": 0.24692737430167597, "acc_norm_stderr": 0.014422292204808835 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.27124183006535946, "acc_stderr": 0.025457756696667878, "acc_norm": 0.27124183006535946, "acc_norm_stderr": 0.025457756696667878 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.3183279742765273, "acc_stderr": 0.026457225067811032, "acc_norm": 0.3183279742765273, "acc_norm_stderr": 0.026457225067811032 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.2839506172839506, "acc_stderr": 0.025089478523765134, "acc_norm": 0.2839506172839506, "acc_norm_stderr": 0.025089478523765134 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.25177304964539005, "acc_stderr": 0.025892151156709405, "acc_norm": 0.25177304964539005, "acc_norm_stderr": 0.025892151156709405 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.242503259452412, "acc_stderr": 0.01094657096634878, "acc_norm": 0.242503259452412, "acc_norm_stderr": 0.01094657096634878 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.4522058823529412, "acc_stderr": 0.030233758551596455, "acc_norm": 0.4522058823529412, "acc_norm_stderr": 0.030233758551596455 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.24509803921568626, "acc_stderr": 0.017401816711427657, "acc_norm": 0.24509803921568626, "acc_norm_stderr": 0.017401816711427657 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.3, "acc_stderr": 0.04389311454644286, "acc_norm": 0.3, "acc_norm_stderr": 0.04389311454644286 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.21224489795918366, "acc_stderr": 0.026176967197866767, "acc_norm": 0.21224489795918366, "acc_norm_stderr": 0.026176967197866767 }, "harness|hendrycksTest-sociology|5": { "acc": 0.2537313432835821, "acc_stderr": 0.03076944496729601, "acc_norm": 0.2537313432835821, "acc_norm_stderr": 0.03076944496729601 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.25, "acc_stderr": 0.04351941398892446, "acc_norm": 0.25, "acc_norm_stderr": 0.04351941398892446 }, "harness|hendrycksTest-virology|5": { "acc": 0.2891566265060241, "acc_stderr": 0.03529486801511115, "acc_norm": 0.2891566265060241, "acc_norm_stderr": 0.03529486801511115 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.26900584795321636, "acc_stderr": 0.0340105262010409, "acc_norm": 0.26900584795321636, "acc_norm_stderr": 0.0340105262010409 }, "harness|truthfulqa:mc|0": { "mc1": 0.23623011015911874, "mc1_stderr": 0.014869755015871114, "mc2": 0.3585983664640556, "mc2_stderr": 0.013742745779138914 }, "harness|winogrande|5": { "acc": 0.6471981057616417, "acc_stderr": 0.013429728101788961 }, "harness|gsm8k|5": { "acc": 0.019711902956785442, "acc_stderr": 0.003828982978735702 } } ``` ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
open-llm-leaderboard/details_mwitiderrick__open_llama_3b_glaive_v0.1
[ "region:us" ]
2023-12-23T15:36:04+00:00
{"pretty_name": "Evaluation run of mwitiderrick/open_llama_3b_glaive_v0.1", "dataset_summary": "Dataset automatically created during the evaluation run of model [mwitiderrick/open_llama_3b_glaive_v0.1](https://huggingface.co/mwitiderrick/open_llama_3b_glaive_v0.1) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_mwitiderrick__open_llama_3b_glaive_v0.1\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-12-23T15:34:19.150703](https://huggingface.co/datasets/open-llm-leaderboard/details_mwitiderrick__open_llama_3b_glaive_v0.1/blob/main/results_2023-12-23T15-34-19.150703.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.2843747535406573,\n \"acc_stderr\": 0.031689110133124844,\n \"acc_norm\": 0.28633888958645765,\n \"acc_norm_stderr\": 0.03246963675970039,\n \"mc1\": 0.23623011015911874,\n \"mc1_stderr\": 0.014869755015871114,\n \"mc2\": 0.3585983664640556,\n \"mc2_stderr\": 0.013742745779138914\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.3703071672354949,\n \"acc_stderr\": 0.01411129875167495,\n \"acc_norm\": 0.4069965870307167,\n \"acc_norm_stderr\": 0.014356399418009131\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.4971121290579566,\n \"acc_stderr\": 0.004989698183207831,\n \"acc_norm\": 0.6744672376020713,\n \"acc_norm_stderr\": 0.004676159299105414\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.33,\n \"acc_stderr\": 0.047258156262526045,\n \"acc_norm\": 0.33,\n \"acc_norm_stderr\": 0.047258156262526045\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.2814814814814815,\n \"acc_stderr\": 0.03885004245800254,\n \"acc_norm\": 0.2814814814814815,\n \"acc_norm_stderr\": 0.03885004245800254\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.21052631578947367,\n \"acc_stderr\": 0.03317672787533157,\n \"acc_norm\": 0.21052631578947367,\n \"acc_norm_stderr\": 0.03317672787533157\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.27,\n \"acc_stderr\": 0.044619604333847394,\n \"acc_norm\": 0.27,\n \"acc_norm_stderr\": 0.044619604333847394\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.29056603773584905,\n \"acc_stderr\": 0.027943219989337156,\n \"acc_norm\": 0.29056603773584905,\n \"acc_norm_stderr\": 0.027943219989337156\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.2638888888888889,\n \"acc_stderr\": 0.03685651095897532,\n \"acc_norm\": 0.2638888888888889,\n \"acc_norm_stderr\": 0.03685651095897532\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.23,\n \"acc_stderr\": 0.04229525846816505,\n \"acc_norm\": 0.23,\n \"acc_norm_stderr\": 0.04229525846816505\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.34,\n \"acc_stderr\": 0.04760952285695235,\n \"acc_norm\": 0.34,\n \"acc_norm_stderr\": 0.04760952285695235\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.26,\n \"acc_stderr\": 0.044084400227680794,\n \"acc_norm\": 0.26,\n \"acc_norm_stderr\": 0.044084400227680794\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.3236994219653179,\n \"acc_stderr\": 0.035676037996391685,\n \"acc_norm\": 0.3236994219653179,\n \"acc_norm_stderr\": 0.035676037996391685\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.20588235294117646,\n \"acc_stderr\": 0.04023382273617749,\n \"acc_norm\": 0.20588235294117646,\n \"acc_norm_stderr\": 0.04023382273617749\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.29,\n \"acc_stderr\": 0.045604802157206845,\n \"acc_norm\": 0.29,\n \"acc_norm_stderr\": 0.045604802157206845\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.3148936170212766,\n \"acc_stderr\": 0.030363582197238167,\n \"acc_norm\": 0.3148936170212766,\n \"acc_norm_stderr\": 0.030363582197238167\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.3333333333333333,\n \"acc_stderr\": 0.044346007015849245,\n \"acc_norm\": 0.3333333333333333,\n \"acc_norm_stderr\": 0.044346007015849245\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.27586206896551724,\n \"acc_stderr\": 0.037245636197746325,\n \"acc_norm\": 0.27586206896551724,\n \"acc_norm_stderr\": 0.037245636197746325\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.25396825396825395,\n \"acc_stderr\": 0.022418042891113932,\n \"acc_norm\": 0.25396825396825395,\n \"acc_norm_stderr\": 0.022418042891113932\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.20634920634920634,\n \"acc_stderr\": 0.03619604524124252,\n \"acc_norm\": 0.20634920634920634,\n \"acc_norm_stderr\": 0.03619604524124252\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.31,\n \"acc_stderr\": 0.04648231987117316,\n \"acc_norm\": 0.31,\n \"acc_norm_stderr\": 0.04648231987117316\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.2645161290322581,\n \"acc_stderr\": 0.02509189237885928,\n \"acc_norm\": 0.2645161290322581,\n \"acc_norm_stderr\": 0.02509189237885928\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.30049261083743845,\n \"acc_stderr\": 0.03225799476233484,\n \"acc_norm\": 0.30049261083743845,\n \"acc_norm_stderr\": 0.03225799476233484\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.15,\n \"acc_stderr\": 0.035887028128263714,\n \"acc_norm\": 0.15,\n \"acc_norm_stderr\": 0.035887028128263714\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.26666666666666666,\n \"acc_stderr\": 0.034531318018854146,\n \"acc_norm\": 0.26666666666666666,\n \"acc_norm_stderr\": 0.034531318018854146\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.31313131313131315,\n \"acc_stderr\": 0.03304205087813653,\n \"acc_norm\": 0.31313131313131315,\n \"acc_norm_stderr\": 0.03304205087813653\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.27461139896373055,\n \"acc_stderr\": 0.03221024508041154,\n \"acc_norm\": 0.27461139896373055,\n \"acc_norm_stderr\": 0.03221024508041154\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.32564102564102565,\n \"acc_stderr\": 0.02375966576741229,\n \"acc_norm\": 0.32564102564102565,\n \"acc_norm_stderr\": 0.02375966576741229\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.2518518518518518,\n \"acc_stderr\": 0.02646611753895991,\n \"acc_norm\": 0.2518518518518518,\n \"acc_norm_stderr\": 0.02646611753895991\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.2773109243697479,\n \"acc_stderr\": 0.029079374539480007,\n \"acc_norm\": 0.2773109243697479,\n \"acc_norm_stderr\": 0.029079374539480007\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.26490066225165565,\n \"acc_stderr\": 0.03603038545360383,\n \"acc_norm\": 0.26490066225165565,\n \"acc_norm_stderr\": 0.03603038545360383\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.27339449541284405,\n \"acc_stderr\": 0.019109299846098275,\n \"acc_norm\": 0.27339449541284405,\n \"acc_norm_stderr\": 0.019109299846098275\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.4722222222222222,\n \"acc_stderr\": 0.0340470532865388,\n \"acc_norm\": 0.4722222222222222,\n \"acc_norm_stderr\": 0.0340470532865388\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.24019607843137256,\n \"acc_stderr\": 0.02998373305591361,\n \"acc_norm\": 0.24019607843137256,\n \"acc_norm_stderr\": 0.02998373305591361\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.25316455696202533,\n \"acc_stderr\": 0.028304657943035303,\n \"acc_norm\": 0.25316455696202533,\n \"acc_norm_stderr\": 0.028304657943035303\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.3452914798206278,\n \"acc_stderr\": 0.03191100192835794,\n \"acc_norm\": 0.3452914798206278,\n \"acc_norm_stderr\": 0.03191100192835794\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.22137404580152673,\n \"acc_stderr\": 0.03641297081313728,\n \"acc_norm\": 0.22137404580152673,\n \"acc_norm_stderr\": 0.03641297081313728\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.2975206611570248,\n \"acc_stderr\": 0.041733491480834974,\n \"acc_norm\": 0.2975206611570248,\n \"acc_norm_stderr\": 0.041733491480834974\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.28703703703703703,\n \"acc_stderr\": 0.043733130409147614,\n \"acc_norm\": 0.28703703703703703,\n \"acc_norm_stderr\": 0.043733130409147614\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.26993865030674846,\n \"acc_stderr\": 0.034878251684978906,\n \"acc_norm\": 0.26993865030674846,\n \"acc_norm_stderr\": 0.034878251684978906\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.26785714285714285,\n \"acc_stderr\": 0.04203277291467764,\n \"acc_norm\": 0.26785714285714285,\n \"acc_norm_stderr\": 0.04203277291467764\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.2524271844660194,\n \"acc_stderr\": 0.04301250399690877,\n \"acc_norm\": 0.2524271844660194,\n \"acc_norm_stderr\": 0.04301250399690877\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.2692307692307692,\n \"acc_stderr\": 0.02905858830374884,\n \"acc_norm\": 0.2692307692307692,\n \"acc_norm_stderr\": 0.02905858830374884\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.25,\n \"acc_stderr\": 0.04351941398892446,\n \"acc_norm\": 0.25,\n \"acc_norm_stderr\": 0.04351941398892446\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.28991060025542786,\n \"acc_stderr\": 0.016225017944770957,\n \"acc_norm\": 0.28991060025542786,\n \"acc_norm_stderr\": 0.016225017944770957\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.25722543352601157,\n \"acc_stderr\": 0.02353292543104428,\n \"acc_norm\": 0.25722543352601157,\n \"acc_norm_stderr\": 0.02353292543104428\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.24692737430167597,\n \"acc_stderr\": 0.014422292204808835,\n \"acc_norm\": 0.24692737430167597,\n \"acc_norm_stderr\": 0.014422292204808835\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.27124183006535946,\n \"acc_stderr\": 0.025457756696667878,\n \"acc_norm\": 0.27124183006535946,\n \"acc_norm_stderr\": 0.025457756696667878\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.3183279742765273,\n \"acc_stderr\": 0.026457225067811032,\n \"acc_norm\": 0.3183279742765273,\n \"acc_norm_stderr\": 0.026457225067811032\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.2839506172839506,\n \"acc_stderr\": 0.025089478523765134,\n \"acc_norm\": 0.2839506172839506,\n \"acc_norm_stderr\": 0.025089478523765134\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.25177304964539005,\n \"acc_stderr\": 0.025892151156709405,\n \"acc_norm\": 0.25177304964539005,\n \"acc_norm_stderr\": 0.025892151156709405\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.242503259452412,\n \"acc_stderr\": 0.01094657096634878,\n \"acc_norm\": 0.242503259452412,\n \"acc_norm_stderr\": 0.01094657096634878\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.4522058823529412,\n \"acc_stderr\": 0.030233758551596455,\n \"acc_norm\": 0.4522058823529412,\n \"acc_norm_stderr\": 0.030233758551596455\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.24509803921568626,\n \"acc_stderr\": 0.017401816711427657,\n \"acc_norm\": 0.24509803921568626,\n \"acc_norm_stderr\": 0.017401816711427657\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.3,\n \"acc_stderr\": 0.04389311454644286,\n \"acc_norm\": 0.3,\n \"acc_norm_stderr\": 0.04389311454644286\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.21224489795918366,\n \"acc_stderr\": 0.026176967197866767,\n \"acc_norm\": 0.21224489795918366,\n \"acc_norm_stderr\": 0.026176967197866767\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.2537313432835821,\n \"acc_stderr\": 0.03076944496729601,\n \"acc_norm\": 0.2537313432835821,\n \"acc_norm_stderr\": 0.03076944496729601\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.25,\n \"acc_stderr\": 0.04351941398892446,\n \"acc_norm\": 0.25,\n \"acc_norm_stderr\": 0.04351941398892446\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.2891566265060241,\n \"acc_stderr\": 0.03529486801511115,\n \"acc_norm\": 0.2891566265060241,\n \"acc_norm_stderr\": 0.03529486801511115\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.26900584795321636,\n \"acc_stderr\": 0.0340105262010409,\n \"acc_norm\": 0.26900584795321636,\n \"acc_norm_stderr\": 0.0340105262010409\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.23623011015911874,\n \"mc1_stderr\": 0.014869755015871114,\n \"mc2\": 0.3585983664640556,\n \"mc2_stderr\": 0.013742745779138914\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.6471981057616417,\n \"acc_stderr\": 0.013429728101788961\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.019711902956785442,\n \"acc_stderr\": 0.003828982978735702\n }\n}\n```", "repo_url": "https://huggingface.co/mwitiderrick/open_llama_3b_glaive_v0.1", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|arc:challenge|25_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|gsm8k|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hellaswag|10_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-23T15-34-19.150703.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["**/details_harness|winogrande|5_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-12-23T15-34-19.150703.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_12_23T15_34_19.150703", "path": ["results_2023-12-23T15-34-19.150703.parquet"]}, {"split": "latest", "path": ["results_2023-12-23T15-34-19.150703.parquet"]}]}]}
2023-12-23T15:36:32+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of mwitiderrick/open_llama_3b_glaive_v0.1 Dataset automatically created during the evaluation run of model mwitiderrick/open_llama_3b_glaive_v0.1 on the Open LLM Leaderboard. The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-12-23T15:34:19.150703(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Evaluation run of mwitiderrick/open_llama_3b_glaive_v0.1\n\n\n\nDataset automatically created during the evaluation run of model mwitiderrick/open_llama_3b_glaive_v0.1 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-23T15:34:19.150703(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of mwitiderrick/open_llama_3b_glaive_v0.1\n\n\n\nDataset automatically created during the evaluation run of model mwitiderrick/open_llama_3b_glaive_v0.1 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-23T15:34:19.150703(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 197, 67, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of mwitiderrick/open_llama_3b_glaive_v0.1\n\n\n\nDataset automatically created during the evaluation run of model mwitiderrick/open_llama_3b_glaive_v0.1 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-12-23T15:34:19.150703(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]" ]
93370009a6e86f9c65452b80aa4c263d0115526e
# Dataset Card for "global230k_with_text" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mespinosami/global230k_with_text
[ "region:us" ]
2023-12-23T15:49:19+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 8772332868.54, "num_examples": 162940}, {"name": "validation", "num_bytes": 1357769997.04, "num_examples": 23416}, {"name": "test", "num_bytes": 2618381497.671, "num_examples": 46463}], "download_size": 12445672864, "dataset_size": 12748484363.251001}}
2023-12-24T12:59:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "global230k_with_text" More Information needed
[ "# Dataset Card for \"global230k_with_text\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"global230k_with_text\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"global230k_with_text\"\n\nMore Information needed" ]