sha
stringlengths
40
40
text
stringlengths
1
13.4M
id
stringlengths
2
117
tags
listlengths
1
7.91k
created_at
stringlengths
25
25
metadata
stringlengths
2
875k
last_modified
stringlengths
25
25
arxiv
listlengths
0
25
languages
listlengths
0
7.91k
tags_str
stringlengths
17
159k
text_str
stringlengths
1
447k
text_lists
listlengths
0
352
processed_texts
listlengths
1
353
tokens_length
listlengths
1
353
input_texts
listlengths
1
40
caf9cbc620fd20bf98135d4ab07ad53c75f2486b
# Dataset Card for "elec5307-project-2-dataset-full-public" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yukuai0011/elec5307-project-2-dataset-full-public
[ "region:us" ]
2023-10-09T03:09:27+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "Apple", "1": "Avocado", "2": "Banana", "3": "Blueberry", "4": "Coconut", "5": "Cucumber", "6": "Dragon_fruit", "7": "Grape", "8": "Grapefruit", "9": "Kiwifruit", "10": "Lemon", "11": "Lychee", "12": "Mangoes", "13": "Orange", "14": "Papaya", "15": "Passion fruit", "16": "Peach", "17": "Pear", "18": "Pineapple", "19": "Pomegranate", "20": "Raspberry", "21": "Rockmelon", "22": "Strawberries", "23": "Tomato", "24": "Waterlemon"}}}}], "splits": [{"name": "train", "num_bytes": 344011868.018, "num_examples": 3026}], "download_size": 319895933, "dataset_size": 344011868.018}}
2023-10-09T15:20:58+00:00
[]
[]
TAGS #region-us
# Dataset Card for "elec5307-project-2-dataset-full-public" More Information needed
[ "# Dataset Card for \"elec5307-project-2-dataset-full-public\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"elec5307-project-2-dataset-full-public\"\n\nMore Information needed" ]
[ 6, 24 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"elec5307-project-2-dataset-full-public\"\n\nMore Information needed" ]
8cb9acc0d4941ede6d866bf5bcdaf196044c3d1c
# Dataset Card for "elec5307-project-2-dataset-splited-public" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yukuai0011/elec5307-project-2-dataset-splited-public
[ "region:us" ]
2023-10-09T03:11:15+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "Apple", "1": "Avocado", "2": "Banana", "3": "Blueberry", "4": "Coconut", "5": "Cucumber", "6": "Dragon_fruit", "7": "Grape", "8": "Grapefruit", "9": "Kiwifruit", "10": "Lemon", "11": "Lychee", "12": "Mangoes", "13": "Orange", "14": "Papaya", "15": "Passion fruit", "16": "Peach", "17": "Pear", "18": "Pineapple", "19": "Pomegranate", "20": "Raspberry", "21": "Rockmelon", "22": "Strawberries", "23": "Tomato", "24": "Waterlemon"}}}}], "splits": [{"name": "train", "num_bytes": 270703771.307, "num_examples": 2421}, {"name": "test", "num_bytes": 63336528.0, "num_examples": 605}], "download_size": 320028339, "dataset_size": 334040299.307}}
2023-10-09T15:47:41+00:00
[]
[]
TAGS #region-us
# Dataset Card for "elec5307-project-2-dataset-splited-public" More Information needed
[ "# Dataset Card for \"elec5307-project-2-dataset-splited-public\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"elec5307-project-2-dataset-splited-public\"\n\nMore Information needed" ]
[ 6, 26 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"elec5307-project-2-dataset-splited-public\"\n\nMore Information needed" ]
6c2264e0b36522ed78c10e20b01afd4838bbe82a
# Dataset Card for "COVID-QA-Chunk-64-testset-biencoder-data-90_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
minh21/COVID-QA-Chunk-64-testset-biencoder-data-90_10
[ "region:us" ]
2023-10-09T03:29:09+00:00
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "context_chunks", "sequence": "string"}, {"name": "document_id", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 13595044, "num_examples": 203}], "download_size": 459357, "dataset_size": 13595044}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-09T03:29:10+00:00
[]
[]
TAGS #region-us
# Dataset Card for "COVID-QA-Chunk-64-testset-biencoder-data-90_10" More Information needed
[ "# Dataset Card for \"COVID-QA-Chunk-64-testset-biencoder-data-90_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"COVID-QA-Chunk-64-testset-biencoder-data-90_10\"\n\nMore Information needed" ]
[ 6, 31 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"COVID-QA-Chunk-64-testset-biencoder-data-90_10\"\n\nMore Information needed" ]
f269c8b97516f27dd338aeb68bd1ca3a7f208602
# Dataset Card for "COVID-QA-Chunk-64-question-answering-biencoder-data-90_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
minh21/COVID-QA-Chunk-64-question-answering-biencoder-data-90_10
[ "region:us" ]
2023-10-09T03:29:26+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "context_chunks", "sequence": "string"}, {"name": "document_id", "dtype": "int64"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 78943266, "num_examples": 1631}, {"name": "validation", "num_bytes": 8529659, "num_examples": 185}], "download_size": 14143196, "dataset_size": 87472925}}
2023-10-09T03:29:31+00:00
[]
[]
TAGS #region-us
# Dataset Card for "COVID-QA-Chunk-64-question-answering-biencoder-data-90_10" More Information needed
[ "# Dataset Card for \"COVID-QA-Chunk-64-question-answering-biencoder-data-90_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"COVID-QA-Chunk-64-question-answering-biencoder-data-90_10\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"COVID-QA-Chunk-64-question-answering-biencoder-data-90_10\"\n\nMore Information needed" ]
fab44df047262ef368ee1f2347b65e112ce59a6e
# Dataset Card for "potholes-dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Andyrasika/potholes-dataset
[ "region:us" ]
2023-10-09T03:43:01+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 26575443.0, "num_examples": 350}, {"name": "validation", "num_bytes": 2929769.0, "num_examples": 34}, {"name": "test", "num_bytes": 1442112.0, "num_examples": 16}], "download_size": 30638600, "dataset_size": 30947324.0}}
2023-10-09T03:43:06+00:00
[]
[]
TAGS #region-us
# Dataset Card for "potholes-dataset" More Information needed
[ "# Dataset Card for \"potholes-dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"potholes-dataset\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"potholes-dataset\"\n\nMore Information needed" ]
df96e9f1c702561154ba0ae80e3c95de3808814f
Dataset for Paper: **Q-Bench: A Benchmark for General-Purpose Foundation Models on Low-level Vision**. *Images*: `images.tar` `dev`-*labels*: `llvisionqa_dev.json` `test`-*labels*: `llvisionqa_test.json` See Github for Usage: https://github.com/vqassessment/q-bench. Feel free to cite us. ```bibtex @article{wu2023qbench, title={Q-Bench: A Benchmark for General-Purpose Foundation Models on Low-level Vision}, author={Wu, Haoning and Zhang, Zicheng and Zhang, Erli and Chen, Chaofeng and Liao, Liang and Wang, Annan and Li, Chunyi and Sun, Wenxiu and Yan, Qiong and Zhai, Guangtao and Lin, Weisi}, year={2023}, eprint={2309.14181}, } ```
teowu/LLVisionQA-QBench
[ "license:cc-by-nc-sa-4.0", "arxiv:2309.14181", "region:us" ]
2023-10-09T03:53:34+00:00
{"license": "cc-by-nc-sa-4.0"}
2023-10-13T18:24:55+00:00
[ "2309.14181" ]
[]
TAGS #license-cc-by-nc-sa-4.0 #arxiv-2309.14181 #region-us
Dataset for Paper: Q-Bench: A Benchmark for General-Purpose Foundation Models on Low-level Vision. *Images*: 'URL' 'dev'-*labels*: 'llvisionqa_dev.json' 'test'-*labels*: 'llvisionqa_test.json' See Github for Usage: URL Feel free to cite us.
[]
[ "TAGS\n#license-cc-by-nc-sa-4.0 #arxiv-2309.14181 #region-us \n" ]
[ 27 ]
[ "passage: TAGS\n#license-cc-by-nc-sa-4.0 #arxiv-2309.14181 #region-us \n" ]
4a75c0c7df7dc66b072b6593b554d6d40d37db2b
Dataset for Paper: **Q-Bench: A Benchmark for General-Purpose Foundation Models on Low-level Vision**. See Github: https://github.com/vqassessment/q-bench. Feel free to cite us. ```bibtex @article{wu2023qbench, title={Q-Bench: A Benchmark for General-Purpose Foundation Models on Low-level Vision}, author={Wu, Haoning and Zhang, Zicheng and Zhang, Erli and Chen, Chaofeng and Liao, Liang and Wang, Annan and Li, Chunyi and Sun, Wenxiu and Yan, Qiong and Zhai, Guangtao and Lin, Weisi}, year={2023}, eprint={2309.14181}, } ```
teowu/LLDescribe-QBench
[ "license:cc-by-nc-sa-4.0", "arxiv:2309.14181", "region:us" ]
2023-10-09T03:54:58+00:00
{"license": "cc-by-nc-sa-4.0"}
2023-10-09T07:26:58+00:00
[ "2309.14181" ]
[]
TAGS #license-cc-by-nc-sa-4.0 #arxiv-2309.14181 #region-us
Dataset for Paper: Q-Bench: A Benchmark for General-Purpose Foundation Models on Low-level Vision. See Github: URL Feel free to cite us.
[]
[ "TAGS\n#license-cc-by-nc-sa-4.0 #arxiv-2309.14181 #region-us \n" ]
[ 27 ]
[ "passage: TAGS\n#license-cc-by-nc-sa-4.0 #arxiv-2309.14181 #region-us \n" ]
4960d1d6723ea7c5489eae4c2a1524f7b9eaae16
# Dataset Card for Dataset Name ## Dataset Description - **Paper:** [Can Large Language Models Empower Molecular Property Prediction?](https://arxiv.org/abs/2307.07443) ### Dataset Summary Topic annotation in LLM4Mol is a in-context molecular classification task along with text explanations as molecular representations ### Data Fields
minyiche/llm4mol
[ "arxiv:2307.07443", "region:us" ]
2023-10-09T04:17:47+00:00
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "index", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "label", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2584423, "num_examples": 2015}], "download_size": 750078, "dataset_size": 2584423}}
2023-10-09T17:01:54+00:00
[ "2307.07443" ]
[]
TAGS #arxiv-2307.07443 #region-us
# Dataset Card for Dataset Name ## Dataset Description - Paper: Can Large Language Models Empower Molecular Property Prediction? ### Dataset Summary Topic annotation in LLM4Mol is a in-context molecular classification task along with text explanations as molecular representations ### Data Fields
[ "# Dataset Card for Dataset Name", "## Dataset Description\n- Paper: Can Large Language Models Empower Molecular Property Prediction?", "### Dataset Summary\n\nTopic annotation in LLM4Mol is a in-context molecular classification task along with text explanations as molecular representations", "### Data Fields" ]
[ "TAGS\n#arxiv-2307.07443 #region-us \n", "# Dataset Card for Dataset Name", "## Dataset Description\n- Paper: Can Large Language Models Empower Molecular Property Prediction?", "### Dataset Summary\n\nTopic annotation in LLM4Mol is a in-context molecular classification task along with text explanations as molecular representations", "### Data Fields" ]
[ 14, 8, 21, 34, 5 ]
[ "passage: TAGS\n#arxiv-2307.07443 #region-us \n# Dataset Card for Dataset Name## Dataset Description\n- Paper: Can Large Language Models Empower Molecular Property Prediction?### Dataset Summary\n\nTopic annotation in LLM4Mol is a in-context molecular classification task along with text explanations as molecular representations### Data Fields" ]
e01a330008bf79a8ae63eebf0c75d1aa7c84b08b
# Dataset Card for "a19a65d2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
result-kand2-sdxl-wuerst-karlo/a19a65d2
[ "region:us" ]
2023-10-09T04:20:57+00:00
{"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 174, "num_examples": 10}], "download_size": 1323, "dataset_size": 174}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-09T04:20:58+00:00
[]
[]
TAGS #region-us
# Dataset Card for "a19a65d2" More Information needed
[ "# Dataset Card for \"a19a65d2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"a19a65d2\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"a19a65d2\"\n\nMore Information needed" ]
9f2b6fee458e783ac4a73d567f9b3ca17f5806f1
# Dataset Card for Evaluation run of sequelbox/SharpBalance ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/sequelbox/SharpBalance - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [sequelbox/SharpBalance](https://huggingface.co/sequelbox/SharpBalance) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_sequelbox__SharpBalance", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-10-23T18:53:09.205615](https://huggingface.co/datasets/open-llm-leaderboard/details_sequelbox__SharpBalance/blob/main/results_2023-10-23T18-53-09.205615.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "em": 0.30861996644295303, "em_stderr": 0.00473053301508219, "f1": 0.3692638422818801, "f1_stderr": 0.004628079358040571, "acc": 0.5935214367393442, "acc_stderr": 0.011697898266884079 }, "harness|drop|3": { "em": 0.30861996644295303, "em_stderr": 0.00473053301508219, "f1": 0.3692638422818801, "f1_stderr": 0.004628079358040571 }, "harness|gsm8k|5": { "acc": 0.3464746019711903, "acc_stderr": 0.013107179054313396 }, "harness|winogrande|5": { "acc": 0.840568271507498, "acc_stderr": 0.010288617479454764 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_sequelbox__SharpBalance
[ "region:us" ]
2023-10-09T04:50:11+00:00
{"pretty_name": "Evaluation run of sequelbox/SharpBalance", "dataset_summary": "Dataset automatically created during the evaluation run of model [sequelbox/SharpBalance](https://huggingface.co/sequelbox/SharpBalance) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_sequelbox__SharpBalance\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-10-23T18:53:09.205615](https://huggingface.co/datasets/open-llm-leaderboard/details_sequelbox__SharpBalance/blob/main/results_2023-10-23T18-53-09.205615.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"em\": 0.30861996644295303,\n \"em_stderr\": 0.00473053301508219,\n \"f1\": 0.3692638422818801,\n \"f1_stderr\": 0.004628079358040571,\n \"acc\": 0.5935214367393442,\n \"acc_stderr\": 0.011697898266884079\n },\n \"harness|drop|3\": {\n \"em\": 0.30861996644295303,\n \"em_stderr\": 0.00473053301508219,\n \"f1\": 0.3692638422818801,\n \"f1_stderr\": 0.004628079358040571\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.3464746019711903,\n \"acc_stderr\": 0.013107179054313396\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.840568271507498,\n \"acc_stderr\": 0.010288617479454764\n }\n}\n```", "repo_url": "https://huggingface.co/sequelbox/SharpBalance", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|arc:challenge|25_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_drop_3", "data_files": [{"split": "2023_10_23T18_53_09.205615", "path": ["**/details_harness|drop|3_2023-10-23T18-53-09.205615.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-10-23T18-53-09.205615.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_10_23T18_53_09.205615", "path": ["**/details_harness|gsm8k|5_2023-10-23T18-53-09.205615.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-10-23T18-53-09.205615.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hellaswag|10_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-management|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-virology|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-management|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-virology|5_2023-10-09T05-49-47.525988.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-management|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-virology|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["**/details_harness|truthfulqa:mc|0_2023-10-09T05-49-47.525988.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-10-09T05-49-47.525988.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_10_23T18_53_09.205615", "path": ["**/details_harness|winogrande|5_2023-10-23T18-53-09.205615.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-10-23T18-53-09.205615.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_10_09T05_49_47.525988", "path": ["results_2023-10-09T05-49-47.525988.parquet"]}, {"split": "2023_10_23T18_53_09.205615", "path": ["results_2023-10-23T18-53-09.205615.parquet"]}, {"split": "latest", "path": ["results_2023-10-23T18-53-09.205615.parquet"]}]}]}
2023-10-23T17:53:21+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of sequelbox/SharpBalance ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model sequelbox/SharpBalance on the Open LLM Leaderboard. The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-10-23T18:53:09.205615(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of sequelbox/SharpBalance", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model sequelbox/SharpBalance on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-23T18:53:09.205615(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of sequelbox/SharpBalance", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model sequelbox/SharpBalance on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-23T18:53:09.205615(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 17, 31, 165, 66, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of sequelbox/SharpBalance## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model sequelbox/SharpBalance on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-10-23T18:53:09.205615(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
b0d002c4b4f82369145ca48b8fefe61196e74534
train
krthk/kapardhi_style
[ "region:us" ]
2023-10-09T05:01:31+00:00
{}
2023-10-09T05:12:09+00:00
[]
[]
TAGS #region-us
train
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
0ff728d19f730ecce3392f64dccc2885598b308e
# Dataset Card for The Stack YAML K8s This dataset is a subset of The Stack dataset data/yaml. The YAML files were parsed and filtered out all valid K8s YAML files which is what this data is about. The dataset contains 276520 valid K8s YAML files. The dataset was created by running the [the-stack-yaml-k8s.ipynb](https://github.com/substratusai/the-stack-yaml-k8s/blob/main/the-stack-k8s-yaml.ipynb) Notebook on K8s using [substratus.ai](https://substratus.ai) Source code used to generate dataset: https://github.com/substratusai/the-stack-yaml-k8s Need some help? Questions? Join our Discord server: <a href="https://discord.gg/JeXhcmjZVm"><img alt="discord-invite" src="https://dcbadge.vercel.app/api/server/JeXhcmjZVm?style=flat"></a> ### How to use it ```python from datasets import load_dataset ds = load_dataset("substratusai/the-stack-yaml-k8s", split="train") ds[0]["content"] ``` ## Original The Stack Dataset Description - **Homepage:** https://www.bigcode-project.org/ - **Repository:** https://github.com/bigcode-project - **Paper:** https://arxiv.org/abs/2211.15533 - **Leaderboard:** N/A - **Point of Contact:** [email protected] ## Dataset Structure ### Data Instances Each data instance corresponds to one file. The content of the file is in the `content` feature, and other features (`repository_name`, `licenses`, etc.) provide some metadata. Note that a given file can appear in several different repositories that satisfy our safe-license criterion. If that is the case, only the first – in alphabetical order -- of these repositories is shown for simplicity. ### Data Fields - `content` (string): the content of the file. - `size` (integer): size of the uncompressed file. - `lang` (string): the programming language. - `ext` (string): file extension - `avg_line_length` (float): the average line-length of the file. - `max_line_length` (integer): the maximum line-length of the file. - `alphanum_fraction` (float): the fraction of characters in the file that are alphabetical or numerical characters. - `hexsha` (string): unique git hash of file - `max_{stars|forks|issues}_repo_path` (string): path to file in repo containing this file with maximum number of `{stars|forks|issues}` - `max_{stars|forks|issues}_repo_name` (string): name of repo containing this file with maximum number of `{stars|forks|issues}` - `max_{stars|forks|issues}_repo_head_hexsha` (string): hexsha of repository head - `max_{stars|forks|issues}_repo_licenses` (string): licenses in repository - `max_{stars|forks|issues}_count` (integer): number of `{stars|forks|issues}` in repository - `max_{stars|forks|issues}_repo_{stars|forks|issues}_min_datetime` (string): first timestamp of a `{stars|forks|issues}` event - `max_{stars|forks|issues}_repo_{stars|forks|issues}_max_datetime` (string): last timestamp of a `{stars|forks|issues}` event ### Data Splits The dataset has no splits and all data is loaded as train split by default. If you want to setup a custom train-test split beware that dataset contains a lot of near-duplicates which can cause leakage into the test split. ## Dataset Creation ### Curation Rationale One of the challenges faced by researchers working on code LLMs is the lack of openness and transparency around the development of these systems. Most prior works described the high-level data collection process but did not release the training data. It is therefore difficult for other researchers to fully reproduce these models and understand what kind of pre-training data leads to high-performing code LLMs. By releasing an open large-scale code dataset we hope to make training of code LLMs more reproducible. ### Source Data #### Initial Data Collection and Normalization 220.92M active GitHub repository names were collected from the event archives published between January 1st, 2015 and March 31st, 2022 on [GHArchive](https://gharchive.org/). Only 137.36M of these repositories were public and accessible on GitHub – others were not accessible as they had been deleted by their owners. 51.76B files were downloaded from the public repositories on GitHub between November 2021 and June 2022. 5.28B files were unique. The uncompressed size of all stored files is 92.36TB. The list of programming language extensions is taken from this [list](https://gist.github.com/ppisarczyk/43962d06686722d26d176fad46879d41) (also provided in Appendix C of the paper). Near-deduplication was implemented in the pre-processing pipeline on top of exact deduplication. To find near-duplicates, MinHash with 256 permutations of all documents was computed in linear time. Locality Sensitive Hashing was used to find the clusters of duplicates. Jaccard Similarities were computed inside these clusters to remove any false positives and with a similarity threshold of 0.85. Roughly 40% of permissively licensed files were (near-)duplicates. See section 3 of the paper for further details. The following are not stored: - Files that cannot contribute to training code: binary, empty, could not be decoded - Files larger than 1MB - The excluded file extensions are listed in Appendix B of the paper. ##### License detection Permissive licenses have minimal restrictions on how the software can be copied, modified, and redistributed. The full list of licenses can be found [here](https://huggingface.co/datasets/bigcode/the-stack-dedup/blob/main/licenses.json). GHArchive contained the license information for approximately 12% of the collected repositories. For the remaining repositories, [go-license-detector](https://github.com/src-d/go-license-detector) was run to detect the most likely SPDX license identifier. The detector did not detect a license for ~81% of the repositories, in which case the repository was excluded from the dataset. A file was included in the safe license dataset if at least one of the repositories containing the file had a permissive license. #### Who are the source language producers? The source (code) language producers are users of GitHub that created unique repository names between January 1st, 2015, and March 31st, 2022. ### Personal and Sensitive Information The released dataset may contain sensitive information such as emails, IP addresses, and API/ssh keys that have previously been published to public repositories on GitHub. Deduplication has helped to reduce the amount of sensitive data that may exist. In the event that the dataset contains personal information, researchers should only use public, non-personal information in support of conducting and publishing their [open-access](https://en.wikipedia.org/wiki/Open_access) research. Personal information should not be used for spamming purposes, including sending unsolicited emails or selling of personal information. Complaints, removal requests, and "do not contact" requests can be sent to [email protected]. The PII pipeline for this dataset is still a work in progress (see this [issue](https://github.com/bigcode-project/admin/issues/9) for updates). Researchers that wish to contribute to the anonymization pipeline of the project can apply to join [here](https://www.bigcode-project.org/docs/about/join/). Developers with source code in the dataset can request to have it removed [here](https://www.bigcode-project.org/docs/about/ip/) (proof of code contribution is required). ### Opting out of The Stack We are giving developers the ability to have their code removed from the dataset upon request. The process for submitting and enacting removal requests will keep evolving throughout the project as we receive feedback and build up more data governance tools. You can check if your code is in The Stack with the following ["Am I In The Stack?" Space](https://huggingface.co/spaces/bigcode/in-the-stack). If you'd like to have your data removed from the dataset follow the [instructions on GitHub](https://github.com/bigcode-project/opt-out-v2). ## Considerations for Using the Data ### Social Impact of Dataset The Stack is an output of the BigCode Project. BigCode aims to be responsible by design and by default. The project is conducted in the spirit of Open Science, focused on the responsible development of LLMs for code. With the release of The Stack, we aim to increase access, reproducibility, and transparency of code LLMs in the research community. Work to de-risk and improve on the implementation of ethical best practices of code LLMs is conducted in various BigCode working groups. The Legal, Ethics, and Governance working group has explored topics such as licensing (including copyleft and the intended use of permissively licensed code), attribution of generated code to original code, rights to restrict processing, the inclusion of Personally Identifiable Information (PII), and risks of malicious code, among other topics. This work is ongoing as of October 25th, 2022. We expect code LLMs to enable people from diverse backgrounds to write higher quality code and develop low-code applications. Mission-critical software could become easier to maintain as professional developers are guided by code-generating systems on how to write more robust and efficient code. While the social impact is intended to be positive, the increased accessibility of code LLMs comes with certain risks such as over-reliance on the generated code and long-term effects on the software development job market. A broader impact analysis relating to Code LLMs can be found in section 7 of this [paper](https://arxiv.org/abs/2107.03374). An in-depth risk assessments for Code LLMs can be found in section 4 of this [paper](https://arxiv.org/abs/2207.14157). ### Discussion of Biases The code collected from GitHub does not contain demographic information or proxy information about the demographics. However, it is not without risks, as the comments within the code may contain harmful or offensive language, which could be learned by the models. Widely adopted programming languages like C and Javascript are overrepresented compared to niche programming languages like Julia and Scala. Some programming languages such as SQL, Batchfile, TypeScript are less likely to be permissively licensed (4% vs the average 10%). This may result in a biased representation of those languages. Permissively licensed files also tend to be longer. Roughly 40 natural languages are present in docstrings and comments with English being the most prevalent. In python files, it makes up ~96% of the dataset. For further information on data analysis of the Stack, see this [repo](https://github.com/bigcode-project/bigcode-analysis). ### Other Known Limitations One of the current limitations of The Stack is that scraped HTML for websites may not be compliant with Web Content Accessibility Guidelines ([WCAG](https://www.w3.org/WAI/standards-guidelines/wcag/)). This could have an impact on HTML-generated code that may introduce web accessibility issues. The training dataset could contain malicious code and/or the model could be used to generate malware or ransomware. To the best of our knowledge, all files contained in the dataset are licensed with one of the permissive licenses (see list in [Licensing information](#licensing-information)). The accuracy of license attribution is limited by the accuracy of GHArchive and go-license-detector. Any mistakes should be reported to BigCode Project for review and follow-up as needed. ## Additional Information ### Dataset Curators 1. Harm de Vries, ServiceNow Research, [email protected] 2. Leandro von Werra, Hugging Face, [email protected] ### Licensing Information The Stack is a collection of source code from repositories with various licenses. Any use of all or part of the code gathered in The Stack must abide by the terms of the original licenses, including attribution clauses when relevant. We facilitate this by providing provenance information for each data point. The list of [SPDX license identifiers](https://spdx.org/licenses/) included in the dataset can be found [here](https://huggingface.co/datasets/bigcode/the-stack/blob/main/licenses.json). ### Citation Information ``` @article{Kocetkov2022TheStack, title={The Stack: 3 TB of permissively licensed source code}, author={Kocetkov, Denis and Li, Raymond and Ben Allal, Loubna and Li, Jia and Mou,Chenghao and Muñoz Ferrandis, Carlos and Jernite, Yacine and Mitchell, Margaret and Hughes, Sean and Wolf, Thomas and Bahdanau, Dzmitry and von Werra, Leandro and de Vries, Harm}, journal={Preprint}, year={2022} } ``` ## Terms of Use for The Stack The Stack dataset is a collection of source code in over 300 programming languages. We ask that you read and acknowledge the following points before using the dataset: 1. The Stack is a collection of source code from repositories with various licenses. Any use of all or part of the code gathered in The Stack must abide by the terms of the original licenses, including attribution clauses when relevant. We facilitate this by providing provenance information for each data point. 2. The Stack is regularly updated to enact validated data removal requests. By clicking on "Access repository", you agree to update your own version of The Stack to the most recent usable version specified by the maintainers in [the following thread](https://huggingface.co/datasets/bigcode/the-stack/discussions/7). If you have questions about dataset versions and allowed uses, please also ask them in the dataset’s [community discussions](https://huggingface.co/datasets/bigcode/the-stack/discussions/new). We will also notify users via email when the latest usable version changes. 3. To host, share, or otherwise provide access to The Stack dataset, you must include these Terms of Use and require users to agree to it.
substratusai/the-stack-yaml-k8s
[ "task_categories:text-generation", "language_creators:crowdsourced", "language_creators:expert-generated", "multilinguality:multilingual", "size_categories:unknown", "language:code", "license:other", "arxiv:2211.15533", "arxiv:2107.03374", "arxiv:2207.14157", "region:us" ]
2023-10-09T05:05:26+00:00
{"annotations_creators": [], "language_creators": ["crowdsourced", "expert-generated"], "language": ["code"], "license": ["other"], "multilinguality": ["multilingual"], "size_categories": ["unknown"], "source_datasets": [], "task_categories": ["text-generation"], "task_ids": [], "pretty_name": "The-Stack", "extra_gated_prompt": "## Terms of Use for The Stack\nThe Stack dataset is a collection of source code in over 300 programming languages. We ask that you read and acknowledge the following points before using the dataset:\n1. The Stack is a collection of source code from repositories with various licenses. Any use of all or part of the code gathered in The Stack must abide by the terms of the original licenses, including attribution clauses when relevant. We facilitate this by providing provenance information for each data point.\n2. The Stack is regularly updated to enact validated data removal requests. By clicking on \"Access repository\", you agree to update your own version of The Stack to the most recent usable version specified by the maintainers in [the following thread](https://huggingface.co/datasets/bigcode/the-stack/discussions/7). If you have questions about dataset versions and allowed uses, please also ask them in the dataset\u2019s [community discussions](https://huggingface.co/datasets/bigcode/the-stack/discussions/new). We will also notify users via email when the latest usable version changes.\n3. To host, share, or otherwise provide access to The Stack dataset, you must include [these Terms of Use](https://huggingface.co/datasets/bigcode/the-stack#terms-of-use-for-the-stack) and require users to agree to it.\n\nBy clicking on \"Access repository\" below, you accept that your contact information (email address and username) can be shared with the dataset maintainers as well.", "extra_gated_fields": {"Email": "text", "I have read the License and agree with its terms": "checkbox"}, "dataset_info": {"features": [{"name": "hexsha", "dtype": "string"}, {"name": "size", "dtype": "int64"}, {"name": "ext", "dtype": "string"}, {"name": "lang", "dtype": "string"}, {"name": "max_stars_repo_path", "dtype": "string"}, {"name": "max_stars_repo_name", "dtype": "string"}, {"name": "max_stars_repo_head_hexsha", "dtype": "string"}, {"name": "max_stars_repo_licenses", "sequence": "string"}, {"name": "max_stars_count", "dtype": "int64"}, {"name": "max_stars_repo_stars_event_min_datetime", "dtype": "string"}, {"name": "max_stars_repo_stars_event_max_datetime", "dtype": "string"}, {"name": "max_issues_repo_path", "dtype": "string"}, {"name": "max_issues_repo_name", "dtype": "string"}, {"name": "max_issues_repo_head_hexsha", "dtype": "string"}, {"name": "max_issues_repo_licenses", "sequence": "string"}, {"name": "max_issues_count", "dtype": "int64"}, {"name": "max_issues_repo_issues_event_min_datetime", "dtype": "string"}, {"name": "max_issues_repo_issues_event_max_datetime", "dtype": "string"}, {"name": "max_forks_repo_path", "dtype": "string"}, {"name": "max_forks_repo_name", "dtype": "string"}, {"name": "max_forks_repo_head_hexsha", "dtype": "string"}, {"name": "max_forks_repo_licenses", "sequence": "string"}, {"name": "max_forks_count", "dtype": "int64"}, {"name": "max_forks_repo_forks_event_min_datetime", "dtype": "string"}, {"name": "max_forks_repo_forks_event_max_datetime", "dtype": "string"}, {"name": "content", "dtype": "string"}, {"name": "avg_line_length", "dtype": "float64"}, {"name": "max_line_length", "dtype": "int64"}, {"name": "alphanum_fraction", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 2056665435.7311056, "num_examples": 276520}], "download_size": 312473618, "dataset_size": 2056665435.7311056}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-10T04:30:49+00:00
[ "2211.15533", "2107.03374", "2207.14157" ]
[ "code" ]
TAGS #task_categories-text-generation #language_creators-crowdsourced #language_creators-expert-generated #multilinguality-multilingual #size_categories-unknown #language-code #license-other #arxiv-2211.15533 #arxiv-2107.03374 #arxiv-2207.14157 #region-us
# Dataset Card for The Stack YAML K8s This dataset is a subset of The Stack dataset data/yaml. The YAML files were parsed and filtered out all valid K8s YAML files which is what this data is about. The dataset contains 276520 valid K8s YAML files. The dataset was created by running the URL Notebook on K8s using URL Source code used to generate dataset: URL Need some help? Questions? Join our Discord server: <a href="URL alt="discord-invite" src="URL ### How to use it ## Original The Stack Dataset Description - Homepage: URL - Repository: URL - Paper: URL - Leaderboard: N/A - Point of Contact: contact@URL ## Dataset Structure ### Data Instances Each data instance corresponds to one file. The content of the file is in the 'content' feature, and other features ('repository_name', 'licenses', etc.) provide some metadata. Note that a given file can appear in several different repositories that satisfy our safe-license criterion. If that is the case, only the first – in alphabetical order -- of these repositories is shown for simplicity. ### Data Fields - 'content' (string): the content of the file. - 'size' (integer): size of the uncompressed file. - 'lang' (string): the programming language. - 'ext' (string): file extension - 'avg_line_length' (float): the average line-length of the file. - 'max_line_length' (integer): the maximum line-length of the file. - 'alphanum_fraction' (float): the fraction of characters in the file that are alphabetical or numerical characters. - 'hexsha' (string): unique git hash of file - 'max_{stars|forks|issues}_repo_path' (string): path to file in repo containing this file with maximum number of '{stars|forks|issues}' - 'max_{stars|forks|issues}_repo_name' (string): name of repo containing this file with maximum number of '{stars|forks|issues}' - 'max_{stars|forks|issues}_repo_head_hexsha' (string): hexsha of repository head - 'max_{stars|forks|issues}_repo_licenses' (string): licenses in repository - 'max_{stars|forks|issues}_count' (integer): number of '{stars|forks|issues}' in repository - 'max_{stars|forks|issues}_repo_{stars|forks|issues}_min_datetime' (string): first timestamp of a '{stars|forks|issues}' event - 'max_{stars|forks|issues}_repo_{stars|forks|issues}_max_datetime' (string): last timestamp of a '{stars|forks|issues}' event ### Data Splits The dataset has no splits and all data is loaded as train split by default. If you want to setup a custom train-test split beware that dataset contains a lot of near-duplicates which can cause leakage into the test split. ## Dataset Creation ### Curation Rationale One of the challenges faced by researchers working on code LLMs is the lack of openness and transparency around the development of these systems. Most prior works described the high-level data collection process but did not release the training data. It is therefore difficult for other researchers to fully reproduce these models and understand what kind of pre-training data leads to high-performing code LLMs. By releasing an open large-scale code dataset we hope to make training of code LLMs more reproducible. ### Source Data #### Initial Data Collection and Normalization 220.92M active GitHub repository names were collected from the event archives published between January 1st, 2015 and March 31st, 2022 on GHArchive. Only 137.36M of these repositories were public and accessible on GitHub – others were not accessible as they had been deleted by their owners. 51.76B files were downloaded from the public repositories on GitHub between November 2021 and June 2022. 5.28B files were unique. The uncompressed size of all stored files is 92.36TB. The list of programming language extensions is taken from this list (also provided in Appendix C of the paper). Near-deduplication was implemented in the pre-processing pipeline on top of exact deduplication. To find near-duplicates, MinHash with 256 permutations of all documents was computed in linear time. Locality Sensitive Hashing was used to find the clusters of duplicates. Jaccard Similarities were computed inside these clusters to remove any false positives and with a similarity threshold of 0.85. Roughly 40% of permissively licensed files were (near-)duplicates. See section 3 of the paper for further details. The following are not stored: - Files that cannot contribute to training code: binary, empty, could not be decoded - Files larger than 1MB - The excluded file extensions are listed in Appendix B of the paper. ##### License detection Permissive licenses have minimal restrictions on how the software can be copied, modified, and redistributed. The full list of licenses can be found here. GHArchive contained the license information for approximately 12% of the collected repositories. For the remaining repositories, go-license-detector was run to detect the most likely SPDX license identifier. The detector did not detect a license for ~81% of the repositories, in which case the repository was excluded from the dataset. A file was included in the safe license dataset if at least one of the repositories containing the file had a permissive license. #### Who are the source language producers? The source (code) language producers are users of GitHub that created unique repository names between January 1st, 2015, and March 31st, 2022. ### Personal and Sensitive Information The released dataset may contain sensitive information such as emails, IP addresses, and API/ssh keys that have previously been published to public repositories on GitHub. Deduplication has helped to reduce the amount of sensitive data that may exist. In the event that the dataset contains personal information, researchers should only use public, non-personal information in support of conducting and publishing their open-access research. Personal information should not be used for spamming purposes, including sending unsolicited emails or selling of personal information. Complaints, removal requests, and "do not contact" requests can be sent to contact@URL. The PII pipeline for this dataset is still a work in progress (see this issue for updates). Researchers that wish to contribute to the anonymization pipeline of the project can apply to join here. Developers with source code in the dataset can request to have it removed here (proof of code contribution is required). ### Opting out of The Stack We are giving developers the ability to have their code removed from the dataset upon request. The process for submitting and enacting removal requests will keep evolving throughout the project as we receive feedback and build up more data governance tools. You can check if your code is in The Stack with the following "Am I In The Stack?" Space. If you'd like to have your data removed from the dataset follow the instructions on GitHub. ## Considerations for Using the Data ### Social Impact of Dataset The Stack is an output of the BigCode Project. BigCode aims to be responsible by design and by default. The project is conducted in the spirit of Open Science, focused on the responsible development of LLMs for code. With the release of The Stack, we aim to increase access, reproducibility, and transparency of code LLMs in the research community. Work to de-risk and improve on the implementation of ethical best practices of code LLMs is conducted in various BigCode working groups. The Legal, Ethics, and Governance working group has explored topics such as licensing (including copyleft and the intended use of permissively licensed code), attribution of generated code to original code, rights to restrict processing, the inclusion of Personally Identifiable Information (PII), and risks of malicious code, among other topics. This work is ongoing as of October 25th, 2022. We expect code LLMs to enable people from diverse backgrounds to write higher quality code and develop low-code applications. Mission-critical software could become easier to maintain as professional developers are guided by code-generating systems on how to write more robust and efficient code. While the social impact is intended to be positive, the increased accessibility of code LLMs comes with certain risks such as over-reliance on the generated code and long-term effects on the software development job market. A broader impact analysis relating to Code LLMs can be found in section 7 of this paper. An in-depth risk assessments for Code LLMs can be found in section 4 of this paper. ### Discussion of Biases The code collected from GitHub does not contain demographic information or proxy information about the demographics. However, it is not without risks, as the comments within the code may contain harmful or offensive language, which could be learned by the models. Widely adopted programming languages like C and Javascript are overrepresented compared to niche programming languages like Julia and Scala. Some programming languages such as SQL, Batchfile, TypeScript are less likely to be permissively licensed (4% vs the average 10%). This may result in a biased representation of those languages. Permissively licensed files also tend to be longer. Roughly 40 natural languages are present in docstrings and comments with English being the most prevalent. In python files, it makes up ~96% of the dataset. For further information on data analysis of the Stack, see this repo. ### Other Known Limitations One of the current limitations of The Stack is that scraped HTML for websites may not be compliant with Web Content Accessibility Guidelines (WCAG). This could have an impact on HTML-generated code that may introduce web accessibility issues. The training dataset could contain malicious code and/or the model could be used to generate malware or ransomware. To the best of our knowledge, all files contained in the dataset are licensed with one of the permissive licenses (see list in Licensing information). The accuracy of license attribution is limited by the accuracy of GHArchive and go-license-detector. Any mistakes should be reported to BigCode Project for review and follow-up as needed. ## Additional Information ### Dataset Curators 1. Harm de Vries, ServiceNow Research, harm.devries@URL 2. Leandro von Werra, Hugging Face, leandro@URL ### Licensing Information The Stack is a collection of source code from repositories with various licenses. Any use of all or part of the code gathered in The Stack must abide by the terms of the original licenses, including attribution clauses when relevant. We facilitate this by providing provenance information for each data point. The list of SPDX license identifiers included in the dataset can be found here. ## Terms of Use for The Stack The Stack dataset is a collection of source code in over 300 programming languages. We ask that you read and acknowledge the following points before using the dataset: 1. The Stack is a collection of source code from repositories with various licenses. Any use of all or part of the code gathered in The Stack must abide by the terms of the original licenses, including attribution clauses when relevant. We facilitate this by providing provenance information for each data point. 2. The Stack is regularly updated to enact validated data removal requests. By clicking on "Access repository", you agree to update your own version of The Stack to the most recent usable version specified by the maintainers in the following thread. If you have questions about dataset versions and allowed uses, please also ask them in the dataset’s community discussions. We will also notify users via email when the latest usable version changes. 3. To host, share, or otherwise provide access to The Stack dataset, you must include these Terms of Use and require users to agree to it.
[ "# Dataset Card for The Stack YAML K8s\n\nThis dataset is a subset of The Stack dataset data/yaml. The YAML files were\nparsed and filtered out all valid K8s YAML files which is what this data is about.\n\nThe dataset contains 276520 valid K8s YAML files. The dataset was created by running\nthe URL\nNotebook on K8s using URL\n\nSource code used to generate dataset: URL\n\nNeed some help? Questions? Join our Discord server: <a href=\"URL alt=\"discord-invite\" src=\"URL", "### How to use it", "## Original The Stack Dataset Description\n\n- Homepage: URL\n- Repository: URL\n- Paper: URL\n- Leaderboard: N/A\n- Point of Contact: contact@URL", "## Dataset Structure", "### Data Instances\nEach data instance corresponds to one file. The content of the file is in the 'content' feature, and other features ('repository_name', 'licenses', etc.) provide some metadata. Note that a given file can appear in several different repositories that satisfy our safe-license criterion. If that is the case, only the first – in alphabetical order -- of these repositories is shown for simplicity.", "### Data Fields\n- 'content' (string): the content of the file.\n- 'size' (integer): size of the uncompressed file.\n- 'lang' (string): the programming language. \n- 'ext' (string): file extension\n- 'avg_line_length' (float): the average line-length of the file.\n- 'max_line_length' (integer): the maximum line-length of the file.\n- 'alphanum_fraction' (float): the fraction of characters in the file that are alphabetical or numerical characters.\n- 'hexsha' (string): unique git hash of file\n- 'max_{stars|forks|issues}_repo_path' (string): path to file in repo containing this file with maximum number of '{stars|forks|issues}'\n- 'max_{stars|forks|issues}_repo_name' (string): name of repo containing this file with maximum number of '{stars|forks|issues}'\n- 'max_{stars|forks|issues}_repo_head_hexsha' (string): hexsha of repository head\n- 'max_{stars|forks|issues}_repo_licenses' (string): licenses in repository \n- 'max_{stars|forks|issues}_count' (integer): number of '{stars|forks|issues}' in repository\n- 'max_{stars|forks|issues}_repo_{stars|forks|issues}_min_datetime' (string): first timestamp of a '{stars|forks|issues}' event\n- 'max_{stars|forks|issues}_repo_{stars|forks|issues}_max_datetime' (string): last timestamp of a '{stars|forks|issues}' event", "### Data Splits\n\nThe dataset has no splits and all data is loaded as train split by default. If you want to setup a custom train-test split beware that dataset contains a lot of near-duplicates which can cause leakage into the test split.", "## Dataset Creation", "### Curation Rationale\nOne of the challenges faced by researchers working on code LLMs is the lack of openness and transparency around the development of these systems. Most prior works described the high-level data collection process but did not release the training data. It is therefore difficult for other researchers to fully reproduce these models and understand what kind of pre-training data leads to high-performing code LLMs. By releasing an open large-scale code dataset we hope to make training of code LLMs more reproducible.", "### Source Data", "#### Initial Data Collection and Normalization\n220.92M active GitHub repository names were collected from the event archives published between January 1st, 2015 and March 31st, 2022 on GHArchive. Only 137.36M of these repositories were public and accessible on GitHub – others were not accessible as they had been deleted by their owners. 51.76B files were downloaded from the public repositories on GitHub between November 2021 and June 2022. 5.28B files were unique. The uncompressed size of all stored files is 92.36TB. \n\nThe list of programming language extensions is taken from this list (also provided in Appendix C of the paper).\n\nNear-deduplication was implemented in the pre-processing pipeline on top of exact deduplication. To find near-duplicates, MinHash with 256 permutations of all documents was computed in linear time. Locality Sensitive Hashing was used to find the clusters of duplicates. Jaccard Similarities were computed inside these clusters to remove any false positives and with a similarity threshold of 0.85. Roughly 40% of permissively licensed files were (near-)duplicates. See section 3 of the paper for further details.\n\nThe following are not stored:\n- Files that cannot contribute to training code: binary, empty, could not be decoded\n- Files larger than 1MB \n- The excluded file extensions are listed in Appendix B of the paper.", "##### License detection \nPermissive licenses have minimal restrictions on how the software can be copied, modified, and redistributed. The full list of licenses can be found here.\n\nGHArchive contained the license information for approximately 12% of the collected repositories. For the remaining repositories, go-license-detector was run to detect the most likely SPDX license identifier. The detector did not detect a license for ~81% of the repositories, in which case the repository was excluded from the dataset. \n\nA file was included in the safe license dataset if at least one of the repositories containing the file had a permissive license.", "#### Who are the source language producers?\n\nThe source (code) language producers are users of GitHub that created unique repository names between January 1st, 2015, and March 31st, 2022.", "### Personal and Sensitive Information\nThe released dataset may contain sensitive information such as emails, IP addresses, and API/ssh keys that have previously been published to public repositories on GitHub. Deduplication has helped to reduce the amount of sensitive data that may exist. In the event that the dataset contains personal information, researchers should only use public, non-personal information in support of conducting and publishing their open-access research. Personal information should not be used for spamming purposes, including sending unsolicited emails or selling of personal information. Complaints, removal requests, and \"do not contact\" requests can be sent to contact@URL.\n\nThe PII pipeline for this dataset is still a work in progress (see this issue for updates). Researchers that wish to contribute to the anonymization pipeline of the project can apply to join here. Developers with source code in the dataset can request to have it removed here (proof of code contribution is required).", "### Opting out of The Stack\n\nWe are giving developers the ability to have their code removed from the dataset upon request. The process for submitting and enacting removal requests will keep evolving throughout the project as we receive feedback and build up more data governance tools.\n\nYou can check if your code is in The Stack with the following \"Am I In The Stack?\" Space. If you'd like to have your data removed from the dataset follow the instructions on GitHub.", "## Considerations for Using the Data", "### Social Impact of Dataset\n\nThe Stack is an output of the BigCode Project. BigCode aims to be responsible by design and by default. The project is conducted in the spirit of Open Science, focused on the responsible development of LLMs for code.\n\nWith the release of The Stack, we aim to increase access, reproducibility, and transparency of code LLMs in the research community. Work to de-risk and improve on the implementation of ethical best practices of code LLMs is conducted in various BigCode working groups. The Legal, Ethics, and Governance working group has explored topics such as licensing (including copyleft and the intended use of permissively licensed code), attribution of generated code to original code, rights to restrict processing, the inclusion of Personally Identifiable Information (PII), and risks of malicious code, among other topics. This work is ongoing as of October 25th, 2022.\n\nWe expect code LLMs to enable people from diverse backgrounds to write higher quality code and develop low-code applications. Mission-critical software could become easier to maintain as professional developers are guided by code-generating systems on how to write more robust and efficient code. While the social impact is intended to be positive, the increased accessibility of code LLMs comes with certain risks such as over-reliance on the generated code and long-term effects on the software development job market.\n\nA broader impact analysis relating to Code LLMs can be found in section 7 of this paper. An in-depth risk assessments for Code LLMs can be found in section 4 of this paper.", "### Discussion of Biases\nThe code collected from GitHub does not contain demographic information or proxy information about the demographics. However, it is not without risks,\nas the comments within the code may contain harmful or offensive language, which could be learned by the models. \n\nWidely adopted programming languages like C and Javascript are overrepresented compared to niche programming languages like Julia and Scala. Some programming languages such as SQL, Batchfile, TypeScript are less likely to be permissively licensed (4% vs the average 10%). This may result in a biased representation of those languages. Permissively licensed files also tend to be longer.\n\nRoughly 40 natural languages are present in docstrings and comments with English being the most prevalent. In python files, it makes up ~96% of the dataset.\n\nFor further information on data analysis of the Stack, see this repo.", "### Other Known Limitations\n\nOne of the current limitations of The Stack is that scraped HTML for websites may not be compliant with Web Content Accessibility Guidelines (WCAG). This could have an impact on HTML-generated code that may introduce web accessibility issues.\n\nThe training dataset could contain malicious code and/or the model could be used to generate malware or ransomware. \n\nTo the best of our knowledge, all files contained in the dataset are licensed with one of the permissive licenses (see list in Licensing information). The accuracy of license attribution is limited by the accuracy of GHArchive and go-license-detector. Any mistakes should be reported to BigCode Project for review and follow-up as needed.", "## Additional Information", "### Dataset Curators\n1. Harm de Vries, ServiceNow Research, harm.devries@URL\n2. Leandro von Werra, Hugging Face, leandro@URL", "### Licensing Information\nThe Stack is a collection of source code from repositories with various licenses. Any use of all or part of the code gathered in The Stack must abide by the terms of the original licenses, including attribution clauses when relevant. We facilitate this by providing provenance information for each data point.\n\nThe list of SPDX license identifiers included in the dataset can be found here.", "## Terms of Use for The Stack\n\n The Stack dataset is a collection of source code in over 300 programming languages. We ask that you read and acknowledge the following points before using the dataset:\n 1. The Stack is a collection of source code from repositories with various licenses. Any use of all or part of the code gathered in The Stack must abide by the terms of the original licenses, including attribution clauses when relevant. We facilitate this by providing provenance information for each data point.\n 2. The Stack is regularly updated to enact validated data removal requests. By clicking on \"Access repository\", you agree to update your own version of The Stack to the most recent usable version specified by the maintainers in the following thread. If you have questions about dataset versions and allowed uses, please also ask them in the dataset’s community discussions. We will also notify users via email when the latest usable version changes.\n 3. To host, share, or otherwise provide access to The Stack dataset, you must include these Terms of Use and require users to agree to it." ]
[ "TAGS\n#task_categories-text-generation #language_creators-crowdsourced #language_creators-expert-generated #multilinguality-multilingual #size_categories-unknown #language-code #license-other #arxiv-2211.15533 #arxiv-2107.03374 #arxiv-2207.14157 #region-us \n", "# Dataset Card for The Stack YAML K8s\n\nThis dataset is a subset of The Stack dataset data/yaml. The YAML files were\nparsed and filtered out all valid K8s YAML files which is what this data is about.\n\nThe dataset contains 276520 valid K8s YAML files. The dataset was created by running\nthe URL\nNotebook on K8s using URL\n\nSource code used to generate dataset: URL\n\nNeed some help? Questions? Join our Discord server: <a href=\"URL alt=\"discord-invite\" src=\"URL", "### How to use it", "## Original The Stack Dataset Description\n\n- Homepage: URL\n- Repository: URL\n- Paper: URL\n- Leaderboard: N/A\n- Point of Contact: contact@URL", "## Dataset Structure", "### Data Instances\nEach data instance corresponds to one file. The content of the file is in the 'content' feature, and other features ('repository_name', 'licenses', etc.) provide some metadata. Note that a given file can appear in several different repositories that satisfy our safe-license criterion. If that is the case, only the first – in alphabetical order -- of these repositories is shown for simplicity.", "### Data Fields\n- 'content' (string): the content of the file.\n- 'size' (integer): size of the uncompressed file.\n- 'lang' (string): the programming language. \n- 'ext' (string): file extension\n- 'avg_line_length' (float): the average line-length of the file.\n- 'max_line_length' (integer): the maximum line-length of the file.\n- 'alphanum_fraction' (float): the fraction of characters in the file that are alphabetical or numerical characters.\n- 'hexsha' (string): unique git hash of file\n- 'max_{stars|forks|issues}_repo_path' (string): path to file in repo containing this file with maximum number of '{stars|forks|issues}'\n- 'max_{stars|forks|issues}_repo_name' (string): name of repo containing this file with maximum number of '{stars|forks|issues}'\n- 'max_{stars|forks|issues}_repo_head_hexsha' (string): hexsha of repository head\n- 'max_{stars|forks|issues}_repo_licenses' (string): licenses in repository \n- 'max_{stars|forks|issues}_count' (integer): number of '{stars|forks|issues}' in repository\n- 'max_{stars|forks|issues}_repo_{stars|forks|issues}_min_datetime' (string): first timestamp of a '{stars|forks|issues}' event\n- 'max_{stars|forks|issues}_repo_{stars|forks|issues}_max_datetime' (string): last timestamp of a '{stars|forks|issues}' event", "### Data Splits\n\nThe dataset has no splits and all data is loaded as train split by default. If you want to setup a custom train-test split beware that dataset contains a lot of near-duplicates which can cause leakage into the test split.", "## Dataset Creation", "### Curation Rationale\nOne of the challenges faced by researchers working on code LLMs is the lack of openness and transparency around the development of these systems. Most prior works described the high-level data collection process but did not release the training data. It is therefore difficult for other researchers to fully reproduce these models and understand what kind of pre-training data leads to high-performing code LLMs. By releasing an open large-scale code dataset we hope to make training of code LLMs more reproducible.", "### Source Data", "#### Initial Data Collection and Normalization\n220.92M active GitHub repository names were collected from the event archives published between January 1st, 2015 and March 31st, 2022 on GHArchive. Only 137.36M of these repositories were public and accessible on GitHub – others were not accessible as they had been deleted by their owners. 51.76B files were downloaded from the public repositories on GitHub between November 2021 and June 2022. 5.28B files were unique. The uncompressed size of all stored files is 92.36TB. \n\nThe list of programming language extensions is taken from this list (also provided in Appendix C of the paper).\n\nNear-deduplication was implemented in the pre-processing pipeline on top of exact deduplication. To find near-duplicates, MinHash with 256 permutations of all documents was computed in linear time. Locality Sensitive Hashing was used to find the clusters of duplicates. Jaccard Similarities were computed inside these clusters to remove any false positives and with a similarity threshold of 0.85. Roughly 40% of permissively licensed files were (near-)duplicates. See section 3 of the paper for further details.\n\nThe following are not stored:\n- Files that cannot contribute to training code: binary, empty, could not be decoded\n- Files larger than 1MB \n- The excluded file extensions are listed in Appendix B of the paper.", "##### License detection \nPermissive licenses have minimal restrictions on how the software can be copied, modified, and redistributed. The full list of licenses can be found here.\n\nGHArchive contained the license information for approximately 12% of the collected repositories. For the remaining repositories, go-license-detector was run to detect the most likely SPDX license identifier. The detector did not detect a license for ~81% of the repositories, in which case the repository was excluded from the dataset. \n\nA file was included in the safe license dataset if at least one of the repositories containing the file had a permissive license.", "#### Who are the source language producers?\n\nThe source (code) language producers are users of GitHub that created unique repository names between January 1st, 2015, and March 31st, 2022.", "### Personal and Sensitive Information\nThe released dataset may contain sensitive information such as emails, IP addresses, and API/ssh keys that have previously been published to public repositories on GitHub. Deduplication has helped to reduce the amount of sensitive data that may exist. In the event that the dataset contains personal information, researchers should only use public, non-personal information in support of conducting and publishing their open-access research. Personal information should not be used for spamming purposes, including sending unsolicited emails or selling of personal information. Complaints, removal requests, and \"do not contact\" requests can be sent to contact@URL.\n\nThe PII pipeline for this dataset is still a work in progress (see this issue for updates). Researchers that wish to contribute to the anonymization pipeline of the project can apply to join here. Developers with source code in the dataset can request to have it removed here (proof of code contribution is required).", "### Opting out of The Stack\n\nWe are giving developers the ability to have their code removed from the dataset upon request. The process for submitting and enacting removal requests will keep evolving throughout the project as we receive feedback and build up more data governance tools.\n\nYou can check if your code is in The Stack with the following \"Am I In The Stack?\" Space. If you'd like to have your data removed from the dataset follow the instructions on GitHub.", "## Considerations for Using the Data", "### Social Impact of Dataset\n\nThe Stack is an output of the BigCode Project. BigCode aims to be responsible by design and by default. The project is conducted in the spirit of Open Science, focused on the responsible development of LLMs for code.\n\nWith the release of The Stack, we aim to increase access, reproducibility, and transparency of code LLMs in the research community. Work to de-risk and improve on the implementation of ethical best practices of code LLMs is conducted in various BigCode working groups. The Legal, Ethics, and Governance working group has explored topics such as licensing (including copyleft and the intended use of permissively licensed code), attribution of generated code to original code, rights to restrict processing, the inclusion of Personally Identifiable Information (PII), and risks of malicious code, among other topics. This work is ongoing as of October 25th, 2022.\n\nWe expect code LLMs to enable people from diverse backgrounds to write higher quality code and develop low-code applications. Mission-critical software could become easier to maintain as professional developers are guided by code-generating systems on how to write more robust and efficient code. While the social impact is intended to be positive, the increased accessibility of code LLMs comes with certain risks such as over-reliance on the generated code and long-term effects on the software development job market.\n\nA broader impact analysis relating to Code LLMs can be found in section 7 of this paper. An in-depth risk assessments for Code LLMs can be found in section 4 of this paper.", "### Discussion of Biases\nThe code collected from GitHub does not contain demographic information or proxy information about the demographics. However, it is not without risks,\nas the comments within the code may contain harmful or offensive language, which could be learned by the models. \n\nWidely adopted programming languages like C and Javascript are overrepresented compared to niche programming languages like Julia and Scala. Some programming languages such as SQL, Batchfile, TypeScript are less likely to be permissively licensed (4% vs the average 10%). This may result in a biased representation of those languages. Permissively licensed files also tend to be longer.\n\nRoughly 40 natural languages are present in docstrings and comments with English being the most prevalent. In python files, it makes up ~96% of the dataset.\n\nFor further information on data analysis of the Stack, see this repo.", "### Other Known Limitations\n\nOne of the current limitations of The Stack is that scraped HTML for websites may not be compliant with Web Content Accessibility Guidelines (WCAG). This could have an impact on HTML-generated code that may introduce web accessibility issues.\n\nThe training dataset could contain malicious code and/or the model could be used to generate malware or ransomware. \n\nTo the best of our knowledge, all files contained in the dataset are licensed with one of the permissive licenses (see list in Licensing information). The accuracy of license attribution is limited by the accuracy of GHArchive and go-license-detector. Any mistakes should be reported to BigCode Project for review and follow-up as needed.", "## Additional Information", "### Dataset Curators\n1. Harm de Vries, ServiceNow Research, harm.devries@URL\n2. Leandro von Werra, Hugging Face, leandro@URL", "### Licensing Information\nThe Stack is a collection of source code from repositories with various licenses. Any use of all or part of the code gathered in The Stack must abide by the terms of the original licenses, including attribution clauses when relevant. We facilitate this by providing provenance information for each data point.\n\nThe list of SPDX license identifiers included in the dataset can be found here.", "## Terms of Use for The Stack\n\n The Stack dataset is a collection of source code in over 300 programming languages. We ask that you read and acknowledge the following points before using the dataset:\n 1. The Stack is a collection of source code from repositories with various licenses. Any use of all or part of the code gathered in The Stack must abide by the terms of the original licenses, including attribution clauses when relevant. We facilitate this by providing provenance information for each data point.\n 2. The Stack is regularly updated to enact validated data removal requests. By clicking on \"Access repository\", you agree to update your own version of The Stack to the most recent usable version specified by the maintainers in the following thread. If you have questions about dataset versions and allowed uses, please also ask them in the dataset’s community discussions. We will also notify users via email when the latest usable version changes.\n 3. To host, share, or otherwise provide access to The Stack dataset, you must include these Terms of Use and require users to agree to it." ]
[ 90, 130, 6, 37, 6, 104, 459, 61, 5, 121, 4, 343, 153, 44, 218, 108, 8, 364, 209, 171, 5, 39, 93, 246 ]
[ "passage: TAGS\n#task_categories-text-generation #language_creators-crowdsourced #language_creators-expert-generated #multilinguality-multilingual #size_categories-unknown #language-code #license-other #arxiv-2211.15533 #arxiv-2107.03374 #arxiv-2207.14157 #region-us \n# Dataset Card for The Stack YAML K8s\n\nThis dataset is a subset of The Stack dataset data/yaml. The YAML files were\nparsed and filtered out all valid K8s YAML files which is what this data is about.\n\nThe dataset contains 276520 valid K8s YAML files. The dataset was created by running\nthe URL\nNotebook on K8s using URL\n\nSource code used to generate dataset: URL\n\nNeed some help? Questions? Join our Discord server: <a href=\"URL alt=\"discord-invite\" src=\"URL### How to use it## Original The Stack Dataset Description\n\n- Homepage: URL\n- Repository: URL\n- Paper: URL\n- Leaderboard: N/A\n- Point of Contact: contact@URL## Dataset Structure### Data Instances\nEach data instance corresponds to one file. The content of the file is in the 'content' feature, and other features ('repository_name', 'licenses', etc.) provide some metadata. Note that a given file can appear in several different repositories that satisfy our safe-license criterion. If that is the case, only the first – in alphabetical order -- of these repositories is shown for simplicity.", "passage: ### Data Fields\n- 'content' (string): the content of the file.\n- 'size' (integer): size of the uncompressed file.\n- 'lang' (string): the programming language. \n- 'ext' (string): file extension\n- 'avg_line_length' (float): the average line-length of the file.\n- 'max_line_length' (integer): the maximum line-length of the file.\n- 'alphanum_fraction' (float): the fraction of characters in the file that are alphabetical or numerical characters.\n- 'hexsha' (string): unique git hash of file\n- 'max_{stars|forks|issues}_repo_path' (string): path to file in repo containing this file with maximum number of '{stars|forks|issues}'\n- 'max_{stars|forks|issues}_repo_name' (string): name of repo containing this file with maximum number of '{stars|forks|issues}'\n- 'max_{stars|forks|issues}_repo_head_hexsha' (string): hexsha of repository head\n- 'max_{stars|forks|issues}_repo_licenses' (string): licenses in repository \n- 'max_{stars|forks|issues}_count' (integer): number of '{stars|forks|issues}' in repository\n- 'max_{stars|forks|issues}_repo_{stars|forks|issues}_min_datetime' (string): first timestamp of a '{stars|forks|issues}' event\n- 'max_{stars|forks|issues}_repo_{stars|forks|issues}_max_datetime' (string): last timestamp of a '{stars|forks|issues}' event### Data Splits\n\nThe dataset has no splits and all data is loaded as train split by default. If you want to setup a custom train-test split beware that dataset contains a lot of near-duplicates which can cause leakage into the test split.## Dataset Creation### Curation Rationale\nOne of the challenges faced by researchers working on code LLMs is the lack of openness and transparency around the development of these systems. Most prior works described the high-level data collection process but did not release the training data. It is therefore difficult for other researchers to fully reproduce these models and understand what kind of pre-training data leads to high-performing code LLMs. By releasing an open large-scale code dataset we hope to make training of code LLMs more reproducible.### Source Data", "passage: #### Initial Data Collection and Normalization\n220.92M active GitHub repository names were collected from the event archives published between January 1st, 2015 and March 31st, 2022 on GHArchive. Only 137.36M of these repositories were public and accessible on GitHub – others were not accessible as they had been deleted by their owners. 51.76B files were downloaded from the public repositories on GitHub between November 2021 and June 2022. 5.28B files were unique. The uncompressed size of all stored files is 92.36TB. \n\nThe list of programming language extensions is taken from this list (also provided in Appendix C of the paper).\n\nNear-deduplication was implemented in the pre-processing pipeline on top of exact deduplication. To find near-duplicates, MinHash with 256 permutations of all documents was computed in linear time. Locality Sensitive Hashing was used to find the clusters of duplicates. Jaccard Similarities were computed inside these clusters to remove any false positives and with a similarity threshold of 0.85. Roughly 40% of permissively licensed files were (near-)duplicates. See section 3 of the paper for further details.\n\nThe following are not stored:\n- Files that cannot contribute to training code: binary, empty, could not be decoded\n- Files larger than 1MB \n- The excluded file extensions are listed in Appendix B of the paper.##### License detection \nPermissive licenses have minimal restrictions on how the software can be copied, modified, and redistributed. The full list of licenses can be found here.\n\nGHArchive contained the license information for approximately 12% of the collected repositories. For the remaining repositories, go-license-detector was run to detect the most likely SPDX license identifier. The detector did not detect a license for ~81% of the repositories, in which case the repository was excluded from the dataset. \n\nA file was included in the safe license dataset if at least one of the repositories containing the file had a permissive license.#### Who are the source language producers?\n\nThe source (code) language producers are users of GitHub that created unique repository names between January 1st, 2015, and March 31st, 2022.### Personal and Sensitive Information\nThe released dataset may contain sensitive information such as emails, IP addresses, and API/ssh keys that have previously been published to public repositories on GitHub. Deduplication has helped to reduce the amount of sensitive data that may exist. In the event that the dataset contains personal information, researchers should only use public, non-personal information in support of conducting and publishing their open-access research. Personal information should not be used for spamming purposes, including sending unsolicited emails or selling of personal information. Complaints, removal requests, and \"do not contact\" requests can be sent to contact@URL.\n\nThe PII pipeline for this dataset is still a work in progress (see this issue for updates). Researchers that wish to contribute to the anonymization pipeline of the project can apply to join here. Developers with source code in the dataset can request to have it removed here (proof of code contribution is required).", "passage: ### Opting out of The Stack\n\nWe are giving developers the ability to have their code removed from the dataset upon request. The process for submitting and enacting removal requests will keep evolving throughout the project as we receive feedback and build up more data governance tools.\n\nYou can check if your code is in The Stack with the following \"Am I In The Stack?\" Space. If you'd like to have your data removed from the dataset follow the instructions on GitHub.## Considerations for Using the Data### Social Impact of Dataset\n\nThe Stack is an output of the BigCode Project. BigCode aims to be responsible by design and by default. The project is conducted in the spirit of Open Science, focused on the responsible development of LLMs for code.\n\nWith the release of The Stack, we aim to increase access, reproducibility, and transparency of code LLMs in the research community. Work to de-risk and improve on the implementation of ethical best practices of code LLMs is conducted in various BigCode working groups. The Legal, Ethics, and Governance working group has explored topics such as licensing (including copyleft and the intended use of permissively licensed code), attribution of generated code to original code, rights to restrict processing, the inclusion of Personally Identifiable Information (PII), and risks of malicious code, among other topics. This work is ongoing as of October 25th, 2022.\n\nWe expect code LLMs to enable people from diverse backgrounds to write higher quality code and develop low-code applications. Mission-critical software could become easier to maintain as professional developers are guided by code-generating systems on how to write more robust and efficient code. While the social impact is intended to be positive, the increased accessibility of code LLMs comes with certain risks such as over-reliance on the generated code and long-term effects on the software development job market.\n\nA broader impact analysis relating to Code LLMs can be found in section 7 of this paper. An in-depth risk assessments for Code LLMs can be found in section 4 of this paper.", "passage: ### Discussion of Biases\nThe code collected from GitHub does not contain demographic information or proxy information about the demographics. However, it is not without risks,\nas the comments within the code may contain harmful or offensive language, which could be learned by the models. \n\nWidely adopted programming languages like C and Javascript are overrepresented compared to niche programming languages like Julia and Scala. Some programming languages such as SQL, Batchfile, TypeScript are less likely to be permissively licensed (4% vs the average 10%). This may result in a biased representation of those languages. Permissively licensed files also tend to be longer.\n\nRoughly 40 natural languages are present in docstrings and comments with English being the most prevalent. In python files, it makes up ~96% of the dataset.\n\nFor further information on data analysis of the Stack, see this repo.### Other Known Limitations\n\nOne of the current limitations of The Stack is that scraped HTML for websites may not be compliant with Web Content Accessibility Guidelines (WCAG). This could have an impact on HTML-generated code that may introduce web accessibility issues.\n\nThe training dataset could contain malicious code and/or the model could be used to generate malware or ransomware. \n\nTo the best of our knowledge, all files contained in the dataset are licensed with one of the permissive licenses (see list in Licensing information). The accuracy of license attribution is limited by the accuracy of GHArchive and go-license-detector. Any mistakes should be reported to BigCode Project for review and follow-up as needed.## Additional Information### Dataset Curators\n1. Harm de Vries, ServiceNow Research, harm.devries@URL\n2. Leandro von Werra, Hugging Face, leandro@URL### Licensing Information\nThe Stack is a collection of source code from repositories with various licenses. Any use of all or part of the code gathered in The Stack must abide by the terms of the original licenses, including attribution clauses when relevant. We facilitate this by providing provenance information for each data point.\n\nThe list of SPDX license identifiers included in the dataset can be found here." ]
86788fa7d198001455d0ce7da34c7fc4c7c92ab3
# AutoTrain Dataset for project: hanzbydarcycao ## Dataset Description This dataset has been automatically processed by AutoTrain for project hanzbydarcycao. ### Languages The BCP-47 code for the dataset's language is zh2en. ## Dataset Structure ### Data Instances A sample from this dataset looks as follows: ```json [ { "source": "sarashi", "target": "sarashi" }, { "source": "Dojo", "target": "Dojo" } ] ``` ### Dataset Fields The dataset has the following fields (also called "features"): ```json { "source": "Value(dtype='string', id=None)", "target": "Value(dtype='string', id=None)" } ``` ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow: | Split name | Num samples | | ------------ | ------------------- | | train | 98 | | valid | 25 |
darcy01/autotrain-data-hanzbydarcycao
[ "task_categories:translation", "language:zh", "language:en", "region:us" ]
2023-10-09T05:10:02+00:00
{"language": ["zh", "en"], "task_categories": ["translation"]}
2023-10-09T05:13:45+00:00
[]
[ "zh", "en" ]
TAGS #task_categories-translation #language-Chinese #language-English #region-us
AutoTrain Dataset for project: hanzbydarcycao ============================================= Dataset Description ------------------- This dataset has been automatically processed by AutoTrain for project hanzbydarcycao. ### Languages The BCP-47 code for the dataset's language is zh2en. Dataset Structure ----------------- ### Data Instances A sample from this dataset looks as follows: ### Dataset Fields The dataset has the following fields (also called "features"): ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow:
[ "### Languages\n\n\nThe BCP-47 code for the dataset's language is zh2en.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
[ "TAGS\n#task_categories-translation #language-Chinese #language-English #region-us \n", "### Languages\n\n\nThe BCP-47 code for the dataset's language is zh2en.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
[ 24, 29, 17, 23, 27 ]
[ "passage: TAGS\n#task_categories-translation #language-Chinese #language-English #region-us \n### Languages\n\n\nThe BCP-47 code for the dataset's language is zh2en.\n\n\nDataset Structure\n-----------------### Data Instances\n\n\nA sample from this dataset looks as follows:### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
ff6c71e8baf0ac92ddf70a8f520494d09d6a919b
# AutoTrain Dataset for project: hanz_en2zh ## Dataset Description This dataset has been automatically processed by AutoTrain for project hanz_en2zh. ### Languages The BCP-47 code for the dataset's language is zh2en. ## Dataset Structure ### Data Instances A sample from this dataset looks as follows: ```json [ { "source": "sarashi", "target": "sarashi" }, { "source": "Dojo", "target": "Dojo" } ] ``` ### Dataset Fields The dataset has the following fields (also called "features"): ```json { "source": "Value(dtype='string', id=None)", "target": "Value(dtype='string', id=None)" } ``` ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow: | Split name | Num samples | | ------------ | ------------------- | | train | 98 | | valid | 25 |
darcycao/autotrain-data-hanz_en2zh
[ "task_categories:translation", "language:zh", "language:en", "region:us" ]
2023-10-09T05:23:22+00:00
{"language": ["zh", "en"], "task_categories": ["translation"]}
2023-10-09T09:13:20+00:00
[]
[ "zh", "en" ]
TAGS #task_categories-translation #language-Chinese #language-English #region-us
AutoTrain Dataset for project: hanz\_en2zh ========================================== Dataset Description ------------------- This dataset has been automatically processed by AutoTrain for project hanz\_en2zh. ### Languages The BCP-47 code for the dataset's language is zh2en. Dataset Structure ----------------- ### Data Instances A sample from this dataset looks as follows: ### Dataset Fields The dataset has the following fields (also called "features"): ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow:
[ "### Languages\n\n\nThe BCP-47 code for the dataset's language is zh2en.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
[ "TAGS\n#task_categories-translation #language-Chinese #language-English #region-us \n", "### Languages\n\n\nThe BCP-47 code for the dataset's language is zh2en.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
[ 24, 29, 17, 23, 27 ]
[ "passage: TAGS\n#task_categories-translation #language-Chinese #language-English #region-us \n### Languages\n\n\nThe BCP-47 code for the dataset's language is zh2en.\n\n\nDataset Structure\n-----------------### Data Instances\n\n\nA sample from this dataset looks as follows:### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
83ad0a6b7411a967f045fb3250c93066f3c58631
# Dataset Card for "flowers_seed_prompts" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Falah/flowers_seed_prompts
[ "region:us" ]
2023-10-09T05:38:57+00:00
{"dataset_info": {"features": [{"name": "prompts", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 89719, "num_examples": 1000}], "download_size": 2001, "dataset_size": 89719}}
2023-10-09T05:38:59+00:00
[]
[]
TAGS #region-us
# Dataset Card for "flowers_seed_prompts" More Information needed
[ "# Dataset Card for \"flowers_seed_prompts\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"flowers_seed_prompts\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"flowers_seed_prompts\"\n\nMore Information needed" ]
b129f741508a416bd622477dc901dccc59beb3da
# Dataset Card for "ShareGPT_V3_unfiltered_cleaned_small_9k" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mychen76/ShareGPT_V3_unfiltered_cleaned_small_9k
[ "region:us" ]
2023-10-09T05:49:29+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "conversations", "list": [{"name": "from", "dtype": "string"}, {"name": "markdown", "struct": [{"name": "answer", "dtype": "string"}, {"name": "index", "dtype": "int64"}, {"name": "type", "dtype": "string"}]}, {"name": "text", "dtype": "string"}, {"name": "value", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 57188795.51333581, "num_examples": 8473}, {"name": "test", "num_bytes": 6358060.35330607, "num_examples": 942}, {"name": "valid", "num_bytes": 641205.6619576185, "num_examples": 95}], "download_size": 28307098, "dataset_size": 64188061.5285995}}
2023-10-09T05:56:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ShareGPT_V3_unfiltered_cleaned_small_9k" More Information needed
[ "# Dataset Card for \"ShareGPT_V3_unfiltered_cleaned_small_9k\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ShareGPT_V3_unfiltered_cleaned_small_9k\"\n\nMore Information needed" ]
[ 6, 30 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ShareGPT_V3_unfiltered_cleaned_small_9k\"\n\nMore Information needed" ]
2abeed072590ca5f8241a554c73ad943a814caf7
# Dataset Card for Evaluation run of krevas/LDCC-Instruct-Llama-2-ko-13B ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/krevas/LDCC-Instruct-Llama-2-ko-13B - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [krevas/LDCC-Instruct-Llama-2-ko-13B](https://huggingface.co/krevas/LDCC-Instruct-Llama-2-ko-13B) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 61 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_krevas__LDCC-Instruct-Llama-2-ko-13B", "harness_truthfulqa_mc_0", split="train") ``` ## Latest results These are the [latest results from run 2023-10-09T06:55:19.126017](https://huggingface.co/datasets/open-llm-leaderboard/details_krevas__LDCC-Instruct-Llama-2-ko-13B/blob/main/results_2023-10-09T06-55-19.126017.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.5140887884293746, "acc_stderr": 0.034831195333324204, "acc_norm": 0.5180581384469735, "acc_norm_stderr": 0.03481277047428223, "mc1": 0.26193390452876375, "mc1_stderr": 0.01539211880501503, "mc2": 0.37999611805412853, "mc2_stderr": 0.013428724763055466 }, "harness|arc:challenge|25": { "acc": 0.5392491467576792, "acc_stderr": 0.014566303676636588, "acc_norm": 0.5674061433447098, "acc_norm_stderr": 0.014478005694182526 }, "harness|hellaswag|10": { "acc": 0.6096395140410277, "acc_stderr": 0.004868341056566223, "acc_norm": 0.8156741684923322, "acc_norm_stderr": 0.0038695723555438196 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.32, "acc_stderr": 0.046882617226215034, "acc_norm": 0.32, "acc_norm_stderr": 0.046882617226215034 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.4666666666666667, "acc_stderr": 0.043097329010363554, "acc_norm": 0.4666666666666667, "acc_norm_stderr": 0.043097329010363554 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.5197368421052632, "acc_stderr": 0.04065771002562605, "acc_norm": 0.5197368421052632, "acc_norm_stderr": 0.04065771002562605 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.52, "acc_stderr": 0.050211673156867795, "acc_norm": 0.52, "acc_norm_stderr": 0.050211673156867795 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.5358490566037736, "acc_stderr": 0.030693675018458003, "acc_norm": 0.5358490566037736, "acc_norm_stderr": 0.030693675018458003 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.5833333333333334, "acc_stderr": 0.04122728707651282, "acc_norm": 0.5833333333333334, "acc_norm_stderr": 0.04122728707651282 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.34, "acc_stderr": 0.04760952285695236, "acc_norm": 0.34, "acc_norm_stderr": 0.04760952285695236 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.39, "acc_stderr": 0.04902071300001974, "acc_norm": 0.39, "acc_norm_stderr": 0.04902071300001974 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.3, "acc_stderr": 0.046056618647183814, "acc_norm": 0.3, "acc_norm_stderr": 0.046056618647183814 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.44508670520231214, "acc_stderr": 0.03789401760283647, "acc_norm": 0.44508670520231214, "acc_norm_stderr": 0.03789401760283647 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.23529411764705882, "acc_stderr": 0.042207736591714506, "acc_norm": 0.23529411764705882, "acc_norm_stderr": 0.042207736591714506 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.67, "acc_stderr": 0.04725815626252607, "acc_norm": 0.67, "acc_norm_stderr": 0.04725815626252607 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.42127659574468085, "acc_stderr": 0.03227834510146268, "acc_norm": 0.42127659574468085, "acc_norm_stderr": 0.03227834510146268 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.30701754385964913, "acc_stderr": 0.0433913832257986, "acc_norm": 0.30701754385964913, "acc_norm_stderr": 0.0433913832257986 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.42758620689655175, "acc_stderr": 0.041227371113703316, "acc_norm": 0.42758620689655175, "acc_norm_stderr": 0.041227371113703316 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.3201058201058201, "acc_stderr": 0.024026846392873506, "acc_norm": 0.3201058201058201, "acc_norm_stderr": 0.024026846392873506 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.30952380952380953, "acc_stderr": 0.04134913018303316, "acc_norm": 0.30952380952380953, "acc_norm_stderr": 0.04134913018303316 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.34, "acc_stderr": 0.04760952285695236, "acc_norm": 0.34, "acc_norm_stderr": 0.04760952285695236 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.5935483870967742, "acc_stderr": 0.027941727346256304, "acc_norm": 0.5935483870967742, "acc_norm_stderr": 0.027941727346256304 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.3448275862068966, "acc_stderr": 0.03344283744280458, "acc_norm": 0.3448275862068966, "acc_norm_stderr": 0.03344283744280458 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.44, "acc_stderr": 0.04988876515698589, "acc_norm": 0.44, "acc_norm_stderr": 0.04988876515698589 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.6303030303030303, "acc_stderr": 0.03769430314512567, "acc_norm": 0.6303030303030303, "acc_norm_stderr": 0.03769430314512567 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.6262626262626263, "acc_stderr": 0.034468977386593325, "acc_norm": 0.6262626262626263, "acc_norm_stderr": 0.034468977386593325 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.7305699481865285, "acc_stderr": 0.03201867122877794, "acc_norm": 0.7305699481865285, "acc_norm_stderr": 0.03201867122877794 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.4461538461538462, "acc_stderr": 0.02520357177302833, "acc_norm": 0.4461538461538462, "acc_norm_stderr": 0.02520357177302833 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.3148148148148148, "acc_stderr": 0.028317533496066485, "acc_norm": 0.3148148148148148, "acc_norm_stderr": 0.028317533496066485 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.5252100840336135, "acc_stderr": 0.0324371805513741, "acc_norm": 0.5252100840336135, "acc_norm_stderr": 0.0324371805513741 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.2582781456953642, "acc_stderr": 0.035737053147634576, "acc_norm": 0.2582781456953642, "acc_norm_stderr": 0.035737053147634576 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.6678899082568808, "acc_stderr": 0.02019268298542333, "acc_norm": 0.6678899082568808, "acc_norm_stderr": 0.02019268298542333 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.35185185185185186, "acc_stderr": 0.032568505702936484, "acc_norm": 0.35185185185185186, "acc_norm_stderr": 0.032568505702936484 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.6666666666666666, "acc_stderr": 0.03308611113236436, "acc_norm": 0.6666666666666666, "acc_norm_stderr": 0.03308611113236436 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.6919831223628692, "acc_stderr": 0.0300523893356057, "acc_norm": 0.6919831223628692, "acc_norm_stderr": 0.0300523893356057 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.600896860986547, "acc_stderr": 0.03286745312567961, "acc_norm": 0.600896860986547, "acc_norm_stderr": 0.03286745312567961 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.5801526717557252, "acc_stderr": 0.043285772152629715, "acc_norm": 0.5801526717557252, "acc_norm_stderr": 0.043285772152629715 }, "harness|hendrycksTest-international_law|5": { "acc": 0.6859504132231405, "acc_stderr": 0.04236964753041018, "acc_norm": 0.6859504132231405, "acc_norm_stderr": 0.04236964753041018 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.6018518518518519, "acc_stderr": 0.04732332615978813, "acc_norm": 0.6018518518518519, "acc_norm_stderr": 0.04732332615978813 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.656441717791411, "acc_stderr": 0.037311335196738925, "acc_norm": 0.656441717791411, "acc_norm_stderr": 0.037311335196738925 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.4017857142857143, "acc_stderr": 0.04653333146973646, "acc_norm": 0.4017857142857143, "acc_norm_stderr": 0.04653333146973646 }, "harness|hendrycksTest-management|5": { "acc": 0.6601941747572816, "acc_stderr": 0.046897659372781335, "acc_norm": 0.6601941747572816, "acc_norm_stderr": 0.046897659372781335 }, "harness|hendrycksTest-marketing|5": { "acc": 0.7991452991452992, "acc_stderr": 0.02624677294689048, "acc_norm": 0.7991452991452992, "acc_norm_stderr": 0.02624677294689048 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.54, "acc_stderr": 0.05009082659620332, "acc_norm": 0.54, "acc_norm_stderr": 0.05009082659620332 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.7113665389527458, "acc_stderr": 0.016203792703197776, "acc_norm": 0.7113665389527458, "acc_norm_stderr": 0.016203792703197776 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.5520231213872833, "acc_stderr": 0.02677299065336182, "acc_norm": 0.5520231213872833, "acc_norm_stderr": 0.02677299065336182 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.24916201117318434, "acc_stderr": 0.014465893829859924, "acc_norm": 0.24916201117318434, "acc_norm_stderr": 0.014465893829859924 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.5359477124183006, "acc_stderr": 0.02855582751652878, "acc_norm": 0.5359477124183006, "acc_norm_stderr": 0.02855582751652878 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.6366559485530546, "acc_stderr": 0.027316847674192703, "acc_norm": 0.6366559485530546, "acc_norm_stderr": 0.027316847674192703 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.6265432098765432, "acc_stderr": 0.026915003011380157, "acc_norm": 0.6265432098765432, "acc_norm_stderr": 0.026915003011380157 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.3617021276595745, "acc_stderr": 0.028663820147199492, "acc_norm": 0.3617021276595745, "acc_norm_stderr": 0.028663820147199492 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.4106910039113429, "acc_stderr": 0.012564871542534353, "acc_norm": 0.4106910039113429, "acc_norm_stderr": 0.012564871542534353 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.4485294117647059, "acc_stderr": 0.0302114796091216, "acc_norm": 0.4485294117647059, "acc_norm_stderr": 0.0302114796091216 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.553921568627451, "acc_stderr": 0.020109864547181357, "acc_norm": 0.553921568627451, "acc_norm_stderr": 0.020109864547181357 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.5909090909090909, "acc_stderr": 0.04709306978661896, "acc_norm": 0.5909090909090909, "acc_norm_stderr": 0.04709306978661896 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.5346938775510204, "acc_stderr": 0.03193207024425314, "acc_norm": 0.5346938775510204, "acc_norm_stderr": 0.03193207024425314 }, "harness|hendrycksTest-sociology|5": { "acc": 0.7064676616915423, "acc_stderr": 0.03220024104534204, "acc_norm": 0.7064676616915423, "acc_norm_stderr": 0.03220024104534204 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.75, "acc_stderr": 0.04351941398892446, "acc_norm": 0.75, "acc_norm_stderr": 0.04351941398892446 }, "harness|hendrycksTest-virology|5": { "acc": 0.463855421686747, "acc_stderr": 0.03882310850890593, "acc_norm": 0.463855421686747, "acc_norm_stderr": 0.03882310850890593 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.783625730994152, "acc_stderr": 0.03158149539338734, "acc_norm": 0.783625730994152, "acc_norm_stderr": 0.03158149539338734 }, "harness|truthfulqa:mc|0": { "mc1": 0.26193390452876375, "mc1_stderr": 0.01539211880501503, "mc2": 0.37999611805412853, "mc2_stderr": 0.013428724763055466 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_krevas__LDCC-Instruct-Llama-2-ko-13B
[ "region:us" ]
2023-10-09T05:55:37+00:00
{"pretty_name": "Evaluation run of krevas/LDCC-Instruct-Llama-2-ko-13B", "dataset_summary": "Dataset automatically created during the evaluation run of model [krevas/LDCC-Instruct-Llama-2-ko-13B](https://huggingface.co/krevas/LDCC-Instruct-Llama-2-ko-13B) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 61 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_krevas__LDCC-Instruct-Llama-2-ko-13B\",\n\t\"harness_truthfulqa_mc_0\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-10-09T06:55:19.126017](https://huggingface.co/datasets/open-llm-leaderboard/details_krevas__LDCC-Instruct-Llama-2-ko-13B/blob/main/results_2023-10-09T06-55-19.126017.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.5140887884293746,\n \"acc_stderr\": 0.034831195333324204,\n \"acc_norm\": 0.5180581384469735,\n \"acc_norm_stderr\": 0.03481277047428223,\n \"mc1\": 0.26193390452876375,\n \"mc1_stderr\": 0.01539211880501503,\n \"mc2\": 0.37999611805412853,\n \"mc2_stderr\": 0.013428724763055466\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.5392491467576792,\n \"acc_stderr\": 0.014566303676636588,\n \"acc_norm\": 0.5674061433447098,\n \"acc_norm_stderr\": 0.014478005694182526\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.6096395140410277,\n \"acc_stderr\": 0.004868341056566223,\n \"acc_norm\": 0.8156741684923322,\n \"acc_norm_stderr\": 0.0038695723555438196\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.32,\n \"acc_stderr\": 0.046882617226215034,\n \"acc_norm\": 0.32,\n \"acc_norm_stderr\": 0.046882617226215034\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.4666666666666667,\n \"acc_stderr\": 0.043097329010363554,\n \"acc_norm\": 0.4666666666666667,\n \"acc_norm_stderr\": 0.043097329010363554\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.5197368421052632,\n \"acc_stderr\": 0.04065771002562605,\n \"acc_norm\": 0.5197368421052632,\n \"acc_norm_stderr\": 0.04065771002562605\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.52,\n \"acc_stderr\": 0.050211673156867795,\n \"acc_norm\": 0.52,\n \"acc_norm_stderr\": 0.050211673156867795\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.5358490566037736,\n \"acc_stderr\": 0.030693675018458003,\n \"acc_norm\": 0.5358490566037736,\n \"acc_norm_stderr\": 0.030693675018458003\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.5833333333333334,\n \"acc_stderr\": 0.04122728707651282,\n \"acc_norm\": 0.5833333333333334,\n \"acc_norm_stderr\": 0.04122728707651282\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.34,\n \"acc_stderr\": 0.04760952285695236,\n \"acc_norm\": 0.34,\n \"acc_norm_stderr\": 0.04760952285695236\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.39,\n \"acc_stderr\": 0.04902071300001974,\n \"acc_norm\": 0.39,\n \"acc_norm_stderr\": 0.04902071300001974\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.3,\n \"acc_stderr\": 0.046056618647183814,\n \"acc_norm\": 0.3,\n \"acc_norm_stderr\": 0.046056618647183814\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.44508670520231214,\n \"acc_stderr\": 0.03789401760283647,\n \"acc_norm\": 0.44508670520231214,\n \"acc_norm_stderr\": 0.03789401760283647\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.23529411764705882,\n \"acc_stderr\": 0.042207736591714506,\n \"acc_norm\": 0.23529411764705882,\n \"acc_norm_stderr\": 0.042207736591714506\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.67,\n \"acc_stderr\": 0.04725815626252607,\n \"acc_norm\": 0.67,\n \"acc_norm_stderr\": 0.04725815626252607\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.42127659574468085,\n \"acc_stderr\": 0.03227834510146268,\n \"acc_norm\": 0.42127659574468085,\n \"acc_norm_stderr\": 0.03227834510146268\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.30701754385964913,\n \"acc_stderr\": 0.0433913832257986,\n \"acc_norm\": 0.30701754385964913,\n \"acc_norm_stderr\": 0.0433913832257986\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.42758620689655175,\n \"acc_stderr\": 0.041227371113703316,\n \"acc_norm\": 0.42758620689655175,\n \"acc_norm_stderr\": 0.041227371113703316\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.3201058201058201,\n \"acc_stderr\": 0.024026846392873506,\n \"acc_norm\": 0.3201058201058201,\n \"acc_norm_stderr\": 0.024026846392873506\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.30952380952380953,\n \"acc_stderr\": 0.04134913018303316,\n \"acc_norm\": 0.30952380952380953,\n \"acc_norm_stderr\": 0.04134913018303316\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.34,\n \"acc_stderr\": 0.04760952285695236,\n \"acc_norm\": 0.34,\n \"acc_norm_stderr\": 0.04760952285695236\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.5935483870967742,\n \"acc_stderr\": 0.027941727346256304,\n \"acc_norm\": 0.5935483870967742,\n \"acc_norm_stderr\": 0.027941727346256304\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.3448275862068966,\n \"acc_stderr\": 0.03344283744280458,\n \"acc_norm\": 0.3448275862068966,\n \"acc_norm_stderr\": 0.03344283744280458\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.44,\n \"acc_stderr\": 0.04988876515698589,\n \"acc_norm\": 0.44,\n \"acc_norm_stderr\": 0.04988876515698589\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.6303030303030303,\n \"acc_stderr\": 0.03769430314512567,\n \"acc_norm\": 0.6303030303030303,\n \"acc_norm_stderr\": 0.03769430314512567\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.6262626262626263,\n \"acc_stderr\": 0.034468977386593325,\n \"acc_norm\": 0.6262626262626263,\n \"acc_norm_stderr\": 0.034468977386593325\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.7305699481865285,\n \"acc_stderr\": 0.03201867122877794,\n \"acc_norm\": 0.7305699481865285,\n \"acc_norm_stderr\": 0.03201867122877794\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.4461538461538462,\n \"acc_stderr\": 0.02520357177302833,\n \"acc_norm\": 0.4461538461538462,\n \"acc_norm_stderr\": 0.02520357177302833\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.3148148148148148,\n \"acc_stderr\": 0.028317533496066485,\n \"acc_norm\": 0.3148148148148148,\n \"acc_norm_stderr\": 0.028317533496066485\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.5252100840336135,\n \"acc_stderr\": 0.0324371805513741,\n \"acc_norm\": 0.5252100840336135,\n \"acc_norm_stderr\": 0.0324371805513741\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.2582781456953642,\n \"acc_stderr\": 0.035737053147634576,\n \"acc_norm\": 0.2582781456953642,\n \"acc_norm_stderr\": 0.035737053147634576\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.6678899082568808,\n \"acc_stderr\": 0.02019268298542333,\n \"acc_norm\": 0.6678899082568808,\n \"acc_norm_stderr\": 0.02019268298542333\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.35185185185185186,\n \"acc_stderr\": 0.032568505702936484,\n \"acc_norm\": 0.35185185185185186,\n \"acc_norm_stderr\": 0.032568505702936484\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.6666666666666666,\n \"acc_stderr\": 0.03308611113236436,\n \"acc_norm\": 0.6666666666666666,\n \"acc_norm_stderr\": 0.03308611113236436\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.6919831223628692,\n \"acc_stderr\": 0.0300523893356057,\n \"acc_norm\": 0.6919831223628692,\n \"acc_norm_stderr\": 0.0300523893356057\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.600896860986547,\n \"acc_stderr\": 0.03286745312567961,\n \"acc_norm\": 0.600896860986547,\n \"acc_norm_stderr\": 0.03286745312567961\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.5801526717557252,\n \"acc_stderr\": 0.043285772152629715,\n \"acc_norm\": 0.5801526717557252,\n \"acc_norm_stderr\": 0.043285772152629715\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.6859504132231405,\n \"acc_stderr\": 0.04236964753041018,\n \"acc_norm\": 0.6859504132231405,\n \"acc_norm_stderr\": 0.04236964753041018\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.6018518518518519,\n \"acc_stderr\": 0.04732332615978813,\n \"acc_norm\": 0.6018518518518519,\n \"acc_norm_stderr\": 0.04732332615978813\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.656441717791411,\n \"acc_stderr\": 0.037311335196738925,\n \"acc_norm\": 0.656441717791411,\n \"acc_norm_stderr\": 0.037311335196738925\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.4017857142857143,\n \"acc_stderr\": 0.04653333146973646,\n \"acc_norm\": 0.4017857142857143,\n \"acc_norm_stderr\": 0.04653333146973646\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.6601941747572816,\n \"acc_stderr\": 0.046897659372781335,\n \"acc_norm\": 0.6601941747572816,\n \"acc_norm_stderr\": 0.046897659372781335\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.7991452991452992,\n \"acc_stderr\": 0.02624677294689048,\n \"acc_norm\": 0.7991452991452992,\n \"acc_norm_stderr\": 0.02624677294689048\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.54,\n \"acc_stderr\": 0.05009082659620332,\n \"acc_norm\": 0.54,\n \"acc_norm_stderr\": 0.05009082659620332\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.7113665389527458,\n \"acc_stderr\": 0.016203792703197776,\n \"acc_norm\": 0.7113665389527458,\n \"acc_norm_stderr\": 0.016203792703197776\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.5520231213872833,\n \"acc_stderr\": 0.02677299065336182,\n \"acc_norm\": 0.5520231213872833,\n \"acc_norm_stderr\": 0.02677299065336182\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.24916201117318434,\n \"acc_stderr\": 0.014465893829859924,\n \"acc_norm\": 0.24916201117318434,\n \"acc_norm_stderr\": 0.014465893829859924\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.5359477124183006,\n \"acc_stderr\": 0.02855582751652878,\n \"acc_norm\": 0.5359477124183006,\n \"acc_norm_stderr\": 0.02855582751652878\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.6366559485530546,\n \"acc_stderr\": 0.027316847674192703,\n \"acc_norm\": 0.6366559485530546,\n \"acc_norm_stderr\": 0.027316847674192703\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.6265432098765432,\n \"acc_stderr\": 0.026915003011380157,\n \"acc_norm\": 0.6265432098765432,\n \"acc_norm_stderr\": 0.026915003011380157\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.3617021276595745,\n \"acc_stderr\": 0.028663820147199492,\n \"acc_norm\": 0.3617021276595745,\n \"acc_norm_stderr\": 0.028663820147199492\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.4106910039113429,\n \"acc_stderr\": 0.012564871542534353,\n \"acc_norm\": 0.4106910039113429,\n \"acc_norm_stderr\": 0.012564871542534353\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.4485294117647059,\n \"acc_stderr\": 0.0302114796091216,\n \"acc_norm\": 0.4485294117647059,\n \"acc_norm_stderr\": 0.0302114796091216\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.553921568627451,\n \"acc_stderr\": 0.020109864547181357,\n \"acc_norm\": 0.553921568627451,\n \"acc_norm_stderr\": 0.020109864547181357\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.5909090909090909,\n \"acc_stderr\": 0.04709306978661896,\n \"acc_norm\": 0.5909090909090909,\n \"acc_norm_stderr\": 0.04709306978661896\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.5346938775510204,\n \"acc_stderr\": 0.03193207024425314,\n \"acc_norm\": 0.5346938775510204,\n \"acc_norm_stderr\": 0.03193207024425314\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.7064676616915423,\n \"acc_stderr\": 0.03220024104534204,\n \"acc_norm\": 0.7064676616915423,\n \"acc_norm_stderr\": 0.03220024104534204\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.75,\n \"acc_stderr\": 0.04351941398892446,\n \"acc_norm\": 0.75,\n \"acc_norm_stderr\": 0.04351941398892446\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.463855421686747,\n \"acc_stderr\": 0.03882310850890593,\n \"acc_norm\": 0.463855421686747,\n \"acc_norm_stderr\": 0.03882310850890593\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.783625730994152,\n \"acc_stderr\": 0.03158149539338734,\n \"acc_norm\": 0.783625730994152,\n \"acc_norm_stderr\": 0.03158149539338734\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.26193390452876375,\n \"mc1_stderr\": 0.01539211880501503,\n \"mc2\": 0.37999611805412853,\n \"mc2_stderr\": 0.013428724763055466\n }\n}\n```", "repo_url": "https://huggingface.co/krevas/LDCC-Instruct-Llama-2-ko-13B", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|arc:challenge|25_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hellaswag|10_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-management|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-virology|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-management|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-virology|5_2023-10-09T06-55-19.126017.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-management|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-virology|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["**/details_harness|truthfulqa:mc|0_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-10-09T06-55-19.126017.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_10_09T06_55_19.126017", "path": ["results_2023-10-09T06-55-19.126017.parquet"]}, {"split": "latest", "path": ["results_2023-10-09T06-55-19.126017.parquet"]}]}]}
2023-10-09T05:56:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of krevas/LDCC-Instruct-Llama-2-ko-13B ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model krevas/LDCC-Instruct-Llama-2-ko-13B on the Open LLM Leaderboard. The dataset is composed of 61 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-10-09T06:55:19.126017(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of krevas/LDCC-Instruct-Llama-2-ko-13B", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model krevas/LDCC-Instruct-Llama-2-ko-13B on the Open LLM Leaderboard.\n\nThe dataset is composed of 61 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-09T06:55:19.126017(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of krevas/LDCC-Instruct-Llama-2-ko-13B", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model krevas/LDCC-Instruct-Llama-2-ko-13B on the Open LLM Leaderboard.\n\nThe dataset is composed of 61 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-09T06:55:19.126017(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 25, 31, 173, 67, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of krevas/LDCC-Instruct-Llama-2-ko-13B## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model krevas/LDCC-Instruct-Llama-2-ko-13B on the Open LLM Leaderboard.\n\nThe dataset is composed of 61 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-10-09T06:55:19.126017(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
83cca606e0cfd568db57f8da6080e4fbc6f32f3f
# Dataset Card for "validation_data_T5" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Back-up/validation_data_T5
[ "region:us" ]
2023-10-09T05:58:27+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 338661368, "num_examples": 31984}], "download_size": 43689455, "dataset_size": 338661368}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-09T05:58:57+00:00
[]
[]
TAGS #region-us
# Dataset Card for "validation_data_T5" More Information needed
[ "# Dataset Card for \"validation_data_T5\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"validation_data_T5\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"validation_data_T5\"\n\nMore Information needed" ]
2b241c50b4a0eb9bcf083d0fed7d123345c5a7c8
# Dataset Card for "LAION-DallE-3-Local" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ShoukanLabs/LAION-DallE-3-Local
[ "region:us" ]
2023-10-09T05:59:12+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "url", "dtype": "string"}, {"name": "prompt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1531813332.75, "num_examples": 1250}], "download_size": 1176337783, "dataset_size": 1531813332.75}}
2023-10-09T06:14:08+00:00
[]
[]
TAGS #region-us
# Dataset Card for "LAION-DallE-3-Local" More Information needed
[ "# Dataset Card for \"LAION-DallE-3-Local\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"LAION-DallE-3-Local\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"LAION-DallE-3-Local\"\n\nMore Information needed" ]
6ba72035345fe52514151bb64c790c6fcde4632c
# Dataset Card for "LLM4" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Mihir1108/LLM4
[ "region:us" ]
2023-10-09T06:08:58+00:00
{"dataset_info": {"features": [{"name": "formate_data", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1812884, "num_examples": 483}], "download_size": 513538, "dataset_size": 1812884}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-09T06:09:00+00:00
[]
[]
TAGS #region-us
# Dataset Card for "LLM4" More Information needed
[ "# Dataset Card for \"LLM4\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"LLM4\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"LLM4\"\n\nMore Information needed" ]
c0b376bb8f5029b6c77af5c45f2bbc52b1ec2422
# Dataset Card for "eng_sur_val_DA_tokenized" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
carnival13/eng_sur_val_DA_tokenized
[ "region:us" ]
2023-10-09T06:13:06+00:00
{"dataset_info": {"features": [{"name": "pass_label", "dtype": "int64"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 30391635, "num_examples": 22390}], "download_size": 5882210, "dataset_size": 30391635}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-09T06:13:09+00:00
[]
[]
TAGS #region-us
# Dataset Card for "eng_sur_val_DA_tokenized" More Information needed
[ "# Dataset Card for \"eng_sur_val_DA_tokenized\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"eng_sur_val_DA_tokenized\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"eng_sur_val_DA_tokenized\"\n\nMore Information needed" ]
64181f7e90012a99e251031841a22039a20194d6
# Dataset Card for "embeddings_from_distilbert_masking_heaps_and_eval_part0" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
johannes-garstenauer/embeddings_from_distilbert_masking_heaps_and_eval_part0
[ "region:us" ]
2023-10-09T06:13:33+00:00
{"dataset_info": {"features": [{"name": "struct", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "pred", "dtype": "int64"}, {"name": "cls_layer_6", "sequence": "float32"}, {"name": "cls_layer_5", "sequence": "float32"}, {"name": "cls_layer_4", "sequence": "float32"}], "splits": [{"name": "train", "num_bytes": 1282993344, "num_examples": 134592}], "download_size": 1493342036, "dataset_size": 1282993344}}
2023-10-09T06:16:29+00:00
[]
[]
TAGS #region-us
# Dataset Card for "embeddings_from_distilbert_masking_heaps_and_eval_part0" More Information needed
[ "# Dataset Card for \"embeddings_from_distilbert_masking_heaps_and_eval_part0\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"embeddings_from_distilbert_masking_heaps_and_eval_part0\"\n\nMore Information needed" ]
[ 6, 33 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"embeddings_from_distilbert_masking_heaps_and_eval_part0\"\n\nMore Information needed" ]
67ad72c59314f04337fe8863e47e0119b59d1887
# Dataset Card for "night_time_prompts" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Falah/night_time_prompts
[ "region:us" ]
2023-10-09T06:18:41+00:00
{"dataset_info": {"features": [{"name": "prompts", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 706, "num_examples": 5}], "download_size": 1559, "dataset_size": 706}}
2023-10-09T06:18:42+00:00
[]
[]
TAGS #region-us
# Dataset Card for "night_time_prompts" More Information needed
[ "# Dataset Card for \"night_time_prompts\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"night_time_prompts\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"night_time_prompts\"\n\nMore Information needed" ]
cfe336de4fb81d9afcb3027ac68c228bf3f7bafa
# Languages: English, Tagalog ## Collection Process: - Dialogs generated by instructing ChatGPT to respond concisely - Responses edited by Nuph researchers for naturalness - Bilingual exchanges added for diversity ## Intended Use: - Train conversational agents - Research in straightforward dialog # Limitations: - Small scale (300 rows) - Biased toward English - Limited to text conversations # Ethics and Privacy: - No personal or offensive content - ChatGPT instructed to avoid unethical responses - Data anonymized - no personally identifiable information
baebee/chatgpt-custom_inst
[ "task_categories:summarization", "task_categories:question-answering", "task_categories:conversational", "size_categories:n<1K", "language:en", "language:tl", "license:mit", "region:us" ]
2023-10-09T06:31:02+00:00
{"language": ["en", "tl"], "license": "mit", "size_categories": ["n<1K"], "task_categories": ["summarization", "question-answering", "conversational"]}
2023-10-09T18:16:48+00:00
[]
[ "en", "tl" ]
TAGS #task_categories-summarization #task_categories-question-answering #task_categories-conversational #size_categories-n<1K #language-English #language-Tagalog #license-mit #region-us
# Languages: English, Tagalog ## Collection Process: - Dialogs generated by instructing ChatGPT to respond concisely - Responses edited by Nuph researchers for naturalness - Bilingual exchanges added for diversity ## Intended Use: - Train conversational agents - Research in straightforward dialog # Limitations: - Small scale (300 rows) - Biased toward English - Limited to text conversations # Ethics and Privacy: - No personal or offensive content - ChatGPT instructed to avoid unethical responses - Data anonymized - no personally identifiable information
[ "# Languages: English, Tagalog", "## Collection Process:\n\n- Dialogs generated by instructing ChatGPT to respond concisely\n- Responses edited by Nuph researchers for naturalness\n- Bilingual exchanges added for diversity", "## Intended Use:\n\n- Train conversational agents\n- Research in straightforward dialog", "# Limitations:\n\n- Small scale (300 rows)\n- Biased toward English\n- Limited to text conversations", "# Ethics and Privacy:\n\n- No personal or offensive content\n- ChatGPT instructed to avoid unethical responses\n- Data anonymized - no personally identifiable information" ]
[ "TAGS\n#task_categories-summarization #task_categories-question-answering #task_categories-conversational #size_categories-n<1K #language-English #language-Tagalog #license-mit #region-us \n", "# Languages: English, Tagalog", "## Collection Process:\n\n- Dialogs generated by instructing ChatGPT to respond concisely\n- Responses edited by Nuph researchers for naturalness\n- Bilingual exchanges added for diversity", "## Intended Use:\n\n- Train conversational agents\n- Research in straightforward dialog", "# Limitations:\n\n- Small scale (300 rows)\n- Biased toward English\n- Limited to text conversations", "# Ethics and Privacy:\n\n- No personal or offensive content\n- ChatGPT instructed to avoid unethical responses\n- Data anonymized - no personally identifiable information" ]
[ 63, 7, 44, 18, 25, 38 ]
[ "passage: TAGS\n#task_categories-summarization #task_categories-question-answering #task_categories-conversational #size_categories-n<1K #language-English #language-Tagalog #license-mit #region-us \n# Languages: English, Tagalog## Collection Process:\n\n- Dialogs generated by instructing ChatGPT to respond concisely\n- Responses edited by Nuph researchers for naturalness\n- Bilingual exchanges added for diversity## Intended Use:\n\n- Train conversational agents\n- Research in straightforward dialog# Limitations:\n\n- Small scale (300 rows)\n- Biased toward English\n- Limited to text conversations# Ethics and Privacy:\n\n- No personal or offensive content\n- ChatGPT instructed to avoid unethical responses\n- Data anonymized - no personally identifiable information" ]
956402b795061e7807f26b7b503f427e9f8106f8
# Dataset Card for "eng_sur_2_DA_tokenized" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
carnival13/eng_sur_2_DA_tokenized
[ "region:us" ]
2023-10-09T06:43:54+00:00
{"dataset_info": {"features": [{"name": "pass_label", "dtype": "int64"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 127645465, "num_examples": 160590}], "download_size": 26460153, "dataset_size": 127645465}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-09T06:43:59+00:00
[]
[]
TAGS #region-us
# Dataset Card for "eng_sur_2_DA_tokenized" More Information needed
[ "# Dataset Card for \"eng_sur_2_DA_tokenized\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"eng_sur_2_DA_tokenized\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"eng_sur_2_DA_tokenized\"\n\nMore Information needed" ]
2c50ffdc74755e657cf5ad68cc47dc36fd50f0f5
# Dataset Card for "input-dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
midojiang/frist-dataset
[ "region:us" ]
2023-10-09T06:51:36+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "ADONIS", "1": "AFRICAN GIANT SWALLOWTAIL", "2": "AMERICAN SNOOT"}}}}], "splits": [{"name": "train", "num_bytes": 8825732.0, "num_examples": 338}], "download_size": 8823395, "dataset_size": 8825732.0}}
2023-10-10T02:14:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for "input-dataset" More Information needed
[ "# Dataset Card for \"input-dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"input-dataset\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"input-dataset\"\n\nMore Information needed" ]
97094cd042f564b1e4029ef9dad3d6a03f23fad8
# Dataset Card for "Data_red" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
zardat/Data_red
[ "region:us" ]
2023-10-09T06:52:44+00:00
{"dataset_info": {"features": [{"name": "x", "sequence": {"sequence": "float32"}}, {"name": "edge_index", "sequence": {"sequence": "float32"}}, {"name": "edge_attr", "sequence": "float32"}, {"name": "y", "dtype": "float32"}], "splits": [{"name": "train", "num_bytes": 151462112, "num_examples": 1018}], "download_size": 3005527, "dataset_size": 151462112}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-09T06:52:58+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Data_red" More Information needed
[ "# Dataset Card for \"Data_red\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Data_red\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"Data_red\"\n\nMore Information needed" ]
2b4001ea2486d81f59b36efd79e95de7d2db3993
# Dataset Card for "donut_5" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
sankettgorey/donut_5
[ "region:us" ]
2023-10-09T06:57:22+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "ground_truth", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 437960649.0, "num_examples": 1000}], "download_size": 402681326, "dataset_size": 437960649.0}}
2023-10-09T07:00:21+00:00
[]
[]
TAGS #region-us
# Dataset Card for "donut_5" More Information needed
[ "# Dataset Card for \"donut_5\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"donut_5\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"donut_5\"\n\nMore Information needed" ]
36ee596b2e23ad20203931785a356e8370ba56f3
# Dataset Card for "kids_coloring_book_prompts" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Falah/kids_coloring_book_prompts
[ "region:us" ]
2023-10-09T07:04:42+00:00
{"dataset_info": {"features": [{"name": "prompts", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 287035, "num_examples": 3000}], "download_size": 4140, "dataset_size": 287035}}
2023-10-09T07:09:10+00:00
[]
[]
TAGS #region-us
# Dataset Card for "kids_coloring_book_prompts" More Information needed
[ "# Dataset Card for \"kids_coloring_book_prompts\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"kids_coloring_book_prompts\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"kids_coloring_book_prompts\"\n\nMore Information needed" ]
29a30a382e78529040de237d982a621dae9dd157
# Dataset Card for "JazzHiphop" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
RorooroR/JazzHiphop
[ "region:us" ]
2023-10-09T07:06:37+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "audio_file", "dtype": "string"}, {"name": "slice", "dtype": "int16"}], "splits": [{"name": "train", "num_bytes": 191805587.75, "num_examples": 4378}], "download_size": 191445041, "dataset_size": 191805587.75}}
2023-10-09T08:03:32+00:00
[]
[]
TAGS #region-us
# Dataset Card for "JazzHiphop" More Information needed
[ "# Dataset Card for \"JazzHiphop\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"JazzHiphop\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"JazzHiphop\"\n\nMore Information needed" ]
c219dfc50d989a903052f19190a12b4d9866aacb
# M3Exam: A Multilingual 🌏, Multimodal 🖼, Multilevel 📈 Benchmark for LLMs This is the repository for [M3Exam: A Multilingual, Multimodal, Multilevel Benchmark for Examining Large Language Models](https://arxiv.org/abs/2306.05179/). TL;DR: We introduce M3Exam, a novel benchmark sourced from real and official human exam questions for evaluating LLMs in a multilingual, multimodal, and multilevel context. ![image](https://github.com/DAMO-NLP-SG/M3Exam/blob/main/images/m3exam-examples.jpg?raw=true)
chiayewken/m3exam
[ "arxiv:2306.05179", "region:us" ]
2023-10-09T07:32:21+00:00
{"dataset_info": [{"config_name": "afrikaans", "features": [{"name": "question_text", "dtype": "string"}, {"name": "background", "dtype": "string"}, {"name": "answer_text", "dtype": "string"}, {"name": "options", "sequence": "string"}, {"name": "language", "dtype": "string"}, {"name": "level", "dtype": "string"}, {"name": "subject", "dtype": "string"}, {"name": "subject_category", "dtype": "string"}], "splits": [{"name": "dev", "num_bytes": 8860, "num_examples": 25}, {"name": "test", "num_bytes": 194333, "num_examples": 258}], "download_size": 71295, "dataset_size": 203193}, {"config_name": "chinese", "features": [{"name": "question_text", "dtype": "string"}, {"name": "background", "dtype": "string"}, {"name": "answer_text", "dtype": "string"}, {"name": "options", "sequence": "string"}, {"name": "language", "dtype": "string"}, {"name": "level", "dtype": "string"}, {"name": "subject", "dtype": "string"}, {"name": "subject_category", "dtype": "string"}], "splits": [{"name": "dev", "num_bytes": 25055, "num_examples": 29}, {"name": "test", "num_bytes": 485093, "num_examples": 682}], "download_size": 289255, "dataset_size": 510148}, {"config_name": "english", "features": [{"name": "question_text", "dtype": "string"}, {"name": "background", "dtype": "string"}, {"name": "answer_text", "dtype": "string"}, {"name": "options", "sequence": "string"}, {"name": "language", "dtype": "string"}, {"name": "level", "dtype": "string"}, {"name": "subject", "dtype": "string"}, {"name": "subject_category", "dtype": "string"}], "splits": [{"name": "dev", "num_bytes": 12792, "num_examples": 32}, {"name": "test", "num_bytes": 2573796, "num_examples": 1911}], "download_size": 697219, "dataset_size": 2586588}, {"config_name": "italian", "features": [{"name": "question_text", "dtype": "string"}, {"name": "background", "dtype": "string"}, {"name": "answer_text", "dtype": "string"}, {"name": "options", "sequence": "string"}, {"name": "language", "dtype": "string"}, {"name": "level", "dtype": "string"}, {"name": "subject", "dtype": "string"}, {"name": "subject_category", "dtype": "string"}], "splits": [{"name": "dev", "num_bytes": 5834, "num_examples": 18}, {"name": "test", "num_bytes": 2397963, "num_examples": 811}], "download_size": 326671, "dataset_size": 2403797}, {"config_name": "javanese", "features": [{"name": "question_text", "dtype": "string"}, {"name": "background", "dtype": "string"}, {"name": "answer_text", "dtype": "string"}, {"name": "options", "sequence": "string"}, {"name": "language", "dtype": "string"}, {"name": "level", "dtype": "string"}, {"name": "subject", "dtype": "string"}, {"name": "subject_category", "dtype": "string"}], "splits": [{"name": "dev", "num_bytes": 1425, "num_examples": 6}, {"name": "test", "num_bytes": 187280, "num_examples": 371}], "download_size": 84085, "dataset_size": 188705}, {"config_name": "portuguese", "features": [{"name": "question_text", "dtype": "string"}, {"name": "background", "dtype": "string"}, {"name": "answer_text", "dtype": "string"}, {"name": "options", "sequence": "string"}, {"name": "language", "dtype": "string"}, {"name": "level", "dtype": "string"}, {"name": "subject", "dtype": "string"}, {"name": "subject_category", "dtype": "string"}], "splits": [{"name": "dev", "num_bytes": 20979, "num_examples": 24}, {"name": "test", "num_bytes": 941655, "num_examples": 889}], "download_size": 614816, "dataset_size": 962634}, {"config_name": "swahili", "features": [{"name": "question_text", "dtype": "string"}, {"name": "background", "dtype": "string"}, {"name": "answer_text", "dtype": "string"}, {"name": "options", "sequence": "string"}, {"name": "language", "dtype": "string"}, {"name": "level", "dtype": "string"}, {"name": "subject", "dtype": "string"}, {"name": "subject_category", "dtype": "string"}], "splits": [{"name": "dev", "num_bytes": 2053, "num_examples": 6}, {"name": "test", "num_bytes": 607215, "num_examples": 428}], "download_size": 94031, "dataset_size": 609268}, {"config_name": "thai", "features": [{"name": "question_text", "dtype": "string"}, {"name": "background", "dtype": "string"}, {"name": "answer_text", "dtype": "string"}, {"name": "options", "sequence": "string"}, {"name": "language", "dtype": "string"}, {"name": "level", "dtype": "string"}, {"name": "subject", "dtype": "string"}, {"name": "subject_category", "dtype": "string"}], "splits": [{"name": "dev", "num_bytes": 16185, "num_examples": 26}, {"name": "test", "num_bytes": 2249737, "num_examples": 2168}], "download_size": 901256, "dataset_size": 2265922}, {"config_name": "vietnamese", "features": [{"name": "question_text", "dtype": "string"}, {"name": "background", "dtype": "string"}, {"name": "answer_text", "dtype": "string"}, {"name": "options", "sequence": "string"}, {"name": "language", "dtype": "string"}, {"name": "level", "dtype": "string"}, {"name": "subject", "dtype": "string"}, {"name": "subject_category", "dtype": "string"}], "splits": [{"name": "dev", "num_bytes": 7974, "num_examples": 28}, {"name": "test", "num_bytes": 767759, "num_examples": 1789}], "download_size": 375774, "dataset_size": 775733}], "configs": [{"config_name": "afrikaans", "data_files": [{"split": "dev", "path": "afrikaans/dev-*"}, {"split": "test", "path": "afrikaans/test-*"}]}, {"config_name": "chinese", "data_files": [{"split": "dev", "path": "chinese/dev-*"}, {"split": "test", "path": "chinese/test-*"}]}, {"config_name": "english", "data_files": [{"split": "dev", "path": "english/dev-*"}, {"split": "test", "path": "english/test-*"}]}, {"config_name": "italian", "data_files": [{"split": "dev", "path": "italian/dev-*"}, {"split": "test", "path": "italian/test-*"}]}, {"config_name": "javanese", "data_files": [{"split": "dev", "path": "javanese/dev-*"}, {"split": "test", "path": "javanese/test-*"}]}, {"config_name": "portuguese", "data_files": [{"split": "dev", "path": "portuguese/dev-*"}, {"split": "test", "path": "portuguese/test-*"}]}, {"config_name": "swahili", "data_files": [{"split": "dev", "path": "swahili/dev-*"}, {"split": "test", "path": "swahili/test-*"}]}, {"config_name": "thai", "data_files": [{"split": "dev", "path": "thai/dev-*"}, {"split": "test", "path": "thai/test-*"}]}, {"config_name": "vietnamese", "data_files": [{"split": "dev", "path": "vietnamese/dev-*"}, {"split": "test", "path": "vietnamese/test-*"}]}]}
2023-10-09T07:38:11+00:00
[ "2306.05179" ]
[]
TAGS #arxiv-2306.05179 #region-us
# M3Exam: A Multilingual , Multimodal , Multilevel Benchmark for LLMs This is the repository for M3Exam: A Multilingual, Multimodal, Multilevel Benchmark for Examining Large Language Models. TL;DR: We introduce M3Exam, a novel benchmark sourced from real and official human exam questions for evaluating LLMs in a multilingual, multimodal, and multilevel context. !image
[ "# M3Exam: A Multilingual , Multimodal , Multilevel Benchmark for LLMs\n\nThis is the repository for M3Exam: A Multilingual, Multimodal, Multilevel Benchmark for Examining Large Language Models.\n\nTL;DR: We introduce M3Exam, a novel benchmark sourced from real and official human exam questions for evaluating LLMs in a multilingual, multimodal, and multilevel context.\n\n!image" ]
[ "TAGS\n#arxiv-2306.05179 #region-us \n", "# M3Exam: A Multilingual , Multimodal , Multilevel Benchmark for LLMs\n\nThis is the repository for M3Exam: A Multilingual, Multimodal, Multilevel Benchmark for Examining Large Language Models.\n\nTL;DR: We introduce M3Exam, a novel benchmark sourced from real and official human exam questions for evaluating LLMs in a multilingual, multimodal, and multilevel context.\n\n!image" ]
[ 14, 106 ]
[ "passage: TAGS\n#arxiv-2306.05179 #region-us \n# M3Exam: A Multilingual , Multimodal , Multilevel Benchmark for LLMs\n\nThis is the repository for M3Exam: A Multilingual, Multimodal, Multilevel Benchmark for Examining Large Language Models.\n\nTL;DR: We introduce M3Exam, a novel benchmark sourced from real and official human exam questions for evaluating LLMs in a multilingual, multimodal, and multilevel context.\n\n!image" ]
720bc751b2fd3064073ca442938054bb0d530a25
# Dataset Card for Evaluation run of openbmb/UltraLM-13b-v2.0 ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/openbmb/UltraLM-13b-v2.0 - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [openbmb/UltraLM-13b-v2.0](https://huggingface.co/openbmb/UltraLM-13b-v2.0) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 3 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_openbmb__UltraLM-13b-v2.0", "harness_gsm8k_5", split="train") ``` ## Latest results These are the [latest results from run 2023-12-02T13:14:33.191759](https://huggingface.co/datasets/open-llm-leaderboard/details_openbmb__UltraLM-13b-v2.0/blob/main/results_2023-12-02T13-14-33.191759.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.2608036391205459, "acc_stderr": 0.012094252417332734 }, "harness|gsm8k|5": { "acc": 0.2608036391205459, "acc_stderr": 0.012094252417332734 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_openbmb__UltraLM-13b-v2.0
[ "region:us" ]
2023-10-09T07:34:36+00:00
{"pretty_name": "Evaluation run of openbmb/UltraLM-13b-v2.0", "dataset_summary": "Dataset automatically created during the evaluation run of model [openbmb/UltraLM-13b-v2.0](https://huggingface.co/openbmb/UltraLM-13b-v2.0) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 3 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_openbmb__UltraLM-13b-v2.0\",\n\t\"harness_gsm8k_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-12-02T13:14:33.191759](https://huggingface.co/datasets/open-llm-leaderboard/details_openbmb__UltraLM-13b-v2.0/blob/main/results_2023-12-02T13-14-33.191759.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.2608036391205459,\n \"acc_stderr\": 0.012094252417332734\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.2608036391205459,\n \"acc_stderr\": 0.012094252417332734\n }\n}\n```", "repo_url": "https://huggingface.co/openbmb/UltraLM-13b-v2.0", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|arc:challenge|25_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_drop_3", "data_files": [{"split": "2023_10_25T05_11_16.252341", "path": ["**/details_harness|drop|3_2023-10-25T05-11-16.252341.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-10-25T05-11-16.252341.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_10_25T05_11_16.252341", "path": ["**/details_harness|gsm8k|5_2023-10-25T05-11-16.252341.parquet"]}, {"split": "2023_12_02T13_14_33.191759", "path": ["**/details_harness|gsm8k|5_2023-12-02T13-14-33.191759.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-12-02T13-14-33.191759.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hellaswag|10_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-management|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-virology|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-management|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-virology|5_2023-10-09T08-34-12.309014.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-management|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-virology|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["**/details_harness|truthfulqa:mc|0_2023-10-09T08-34-12.309014.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-10-09T08-34-12.309014.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_10_25T05_11_16.252341", "path": ["**/details_harness|winogrande|5_2023-10-25T05-11-16.252341.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-10-25T05-11-16.252341.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_10_09T08_34_12.309014", "path": ["results_2023-10-09T08-34-12.309014.parquet"]}, {"split": "2023_10_25T05_11_16.252341", "path": ["results_2023-10-25T05-11-16.252341.parquet"]}, {"split": "2023_12_02T13_14_33.191759", "path": ["results_2023-12-02T13-14-33.191759.parquet"]}, {"split": "latest", "path": ["results_2023-12-02T13-14-33.191759.parquet"]}]}]}
2023-12-02T13:14:43+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of openbmb/UltraLM-13b-v2.0 ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model openbmb/UltraLM-13b-v2.0 on the Open LLM Leaderboard. The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 3 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-12-02T13:14:33.191759(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of openbmb/UltraLM-13b-v2.0", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model openbmb/UltraLM-13b-v2.0 on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 3 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-02T13:14:33.191759(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of openbmb/UltraLM-13b-v2.0", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model openbmb/UltraLM-13b-v2.0 on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 3 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-02T13:14:33.191759(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 21, 31, 170, 67, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of openbmb/UltraLM-13b-v2.0## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model openbmb/UltraLM-13b-v2.0 on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 3 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-12-02T13:14:33.191759(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
9a404b3c3808c3b5a653f2ed968cead0538d9905
# Dataset Card for "coloring_book_animals" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Falah/coloring_book_animals
[ "region:us" ]
2023-10-09T08:09:02+00:00
{"dataset_info": {"features": [{"name": "prompts", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 284135, "num_examples": 1000}], "download_size": 3100, "dataset_size": 284135}}
2023-10-09T08:09:03+00:00
[]
[]
TAGS #region-us
# Dataset Card for "coloring_book_animals" More Information needed
[ "# Dataset Card for \"coloring_book_animals\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"coloring_book_animals\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"coloring_book_animals\"\n\nMore Information needed" ]
3f4af9f505d06b1d8dd5265070d1cd262afc73ef
# Dataset Card for Evaluation run of Yukang/LongAlpaca-13B ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/Yukang/LongAlpaca-13B - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [Yukang/LongAlpaca-13B](https://huggingface.co/Yukang/LongAlpaca-13B) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_Yukang__LongAlpaca-13B", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-10-27T22:00:30.556276](https://huggingface.co/datasets/open-llm-leaderboard/details_Yukang__LongAlpaca-13B/blob/main/results_2023-10-27T22-00-30.556276.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "em": 0.17051174496644295, "em_stderr": 0.003851429222727117, "f1": 0.23656669463087293, "f1_stderr": 0.003934121554985558, "acc": 0.32044198895027626, "acc_stderr": 0.006741557601060113 }, "harness|drop|3": { "em": 0.17051174496644295, "em_stderr": 0.003851429222727117, "f1": 0.23656669463087293, "f1_stderr": 0.003934121554985558 }, "harness|gsm8k|5": { "acc": 0.0, "acc_stderr": 0.0 }, "harness|winogrande|5": { "acc": 0.6408839779005525, "acc_stderr": 0.013483115202120225 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_Yukang__LongAlpaca-13B
[ "region:us" ]
2023-10-09T08:20:16+00:00
{"pretty_name": "Evaluation run of Yukang/LongAlpaca-13B", "dataset_summary": "Dataset automatically created during the evaluation run of model [Yukang/LongAlpaca-13B](https://huggingface.co/Yukang/LongAlpaca-13B) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_Yukang__LongAlpaca-13B\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-10-27T22:00:30.556276](https://huggingface.co/datasets/open-llm-leaderboard/details_Yukang__LongAlpaca-13B/blob/main/results_2023-10-27T22-00-30.556276.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"em\": 0.17051174496644295,\n \"em_stderr\": 0.003851429222727117,\n \"f1\": 0.23656669463087293,\n \"f1_stderr\": 0.003934121554985558,\n \"acc\": 0.32044198895027626,\n \"acc_stderr\": 0.006741557601060113\n },\n \"harness|drop|3\": {\n \"em\": 0.17051174496644295,\n \"em_stderr\": 0.003851429222727117,\n \"f1\": 0.23656669463087293,\n \"f1_stderr\": 0.003934121554985558\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.0,\n \"acc_stderr\": 0.0\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.6408839779005525,\n \"acc_stderr\": 0.013483115202120225\n }\n}\n```", "repo_url": "https://huggingface.co/Yukang/LongAlpaca-13B", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|arc:challenge|25_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_drop_3", "data_files": [{"split": "2023_10_27T22_00_30.556276", "path": ["**/details_harness|drop|3_2023-10-27T22-00-30.556276.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-10-27T22-00-30.556276.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_10_27T22_00_30.556276", "path": ["**/details_harness|gsm8k|5_2023-10-27T22-00-30.556276.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-10-27T22-00-30.556276.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hellaswag|10_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-management|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-virology|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-management|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-virology|5_2023-10-09T09-19-51.890196.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-management|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-virology|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["**/details_harness|truthfulqa:mc|0_2023-10-09T09-19-51.890196.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-10-09T09-19-51.890196.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_10_27T22_00_30.556276", "path": ["**/details_harness|winogrande|5_2023-10-27T22-00-30.556276.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-10-27T22-00-30.556276.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_10_09T09_19_51.890196", "path": ["results_2023-10-09T09-19-51.890196.parquet"]}, {"split": "2023_10_27T22_00_30.556276", "path": ["results_2023-10-27T22-00-30.556276.parquet"]}, {"split": "latest", "path": ["results_2023-10-27T22-00-30.556276.parquet"]}]}]}
2023-10-27T21:00:42+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of Yukang/LongAlpaca-13B ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model Yukang/LongAlpaca-13B on the Open LLM Leaderboard. The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-10-27T22:00:30.556276(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of Yukang/LongAlpaca-13B", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model Yukang/LongAlpaca-13B on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-27T22:00:30.556276(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of Yukang/LongAlpaca-13B", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model Yukang/LongAlpaca-13B on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-27T22:00:30.556276(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 19, 31, 167, 68, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of Yukang/LongAlpaca-13B## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model Yukang/LongAlpaca-13B on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-10-27T22:00:30.556276(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
8f630951e84851174cd44cf02115efbafaf20620
# Dataset Card for "Drone_Doppler_Noise" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Goorm-AI-04/Drone_Doppler_Noise
[ "region:us" ]
2023-10-09T08:27:42+00:00
{"dataset_info": {"features": [{"name": "image", "sequence": {"sequence": {"sequence": "float64"}}}, {"name": "label", "dtype": "int64"}, {"name": "type", "dtype": "string"}, {"name": "noise_var_0.0001", "sequence": {"sequence": {"sequence": "float64"}}}, {"name": "noise_var_0.0005", "sequence": {"sequence": {"sequence": "float64"}}}, {"name": "noise_var_0.001", "sequence": {"sequence": {"sequence": "float64"}}}, {"name": "noise_var_0.005", "sequence": {"sequence": {"sequence": "float64"}}}, {"name": "noise_var_0.01", "sequence": {"sequence": {"sequence": "float64"}}}], "splits": [{"name": "train", "num_bytes": 395275453, "num_examples": 3497}], "download_size": 314133140, "dataset_size": 395275453}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-09T08:27:59+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Drone_Doppler_Noise" More Information needed
[ "# Dataset Card for \"Drone_Doppler_Noise\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Drone_Doppler_Noise\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"Drone_Doppler_Noise\"\n\nMore Information needed" ]
83b0e4ce56bd5161482e8d0c836160e3a04bed96
# Dataset Card for Evaluation run of IkariDev/Athena-v4 ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/IkariDev/Athena-v4 - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [IkariDev/Athena-v4](https://huggingface.co/IkariDev/Athena-v4) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_IkariDev__Athena-v4", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-10-25T17:00:26.530924](https://huggingface.co/datasets/open-llm-leaderboard/details_IkariDev__Athena-v4/blob/main/results_2023-10-25T17-00-26.530924.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "em": 0.05432046979865772, "em_stderr": 0.002321097609357669, "f1": 0.13087562919463042, "f1_stderr": 0.0026936499511124616, "acc": 0.44229322757129275, "acc_stderr": 0.010432110783601959 }, "harness|drop|3": { "em": 0.05432046979865772, "em_stderr": 0.002321097609357669, "f1": 0.13087562919463042, "f1_stderr": 0.0026936499511124616 }, "harness|gsm8k|5": { "acc": 0.1197877179681577, "acc_stderr": 0.008944213403553046 }, "harness|winogrande|5": { "acc": 0.7647987371744278, "acc_stderr": 0.011920008163650872 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_IkariDev__Athena-v4
[ "region:us" ]
2023-10-09T08:30:04+00:00
{"pretty_name": "Evaluation run of IkariDev/Athena-v4", "dataset_summary": "Dataset automatically created during the evaluation run of model [IkariDev/Athena-v4](https://huggingface.co/IkariDev/Athena-v4) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_IkariDev__Athena-v4\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-10-25T17:00:26.530924](https://huggingface.co/datasets/open-llm-leaderboard/details_IkariDev__Athena-v4/blob/main/results_2023-10-25T17-00-26.530924.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"em\": 0.05432046979865772,\n \"em_stderr\": 0.002321097609357669,\n \"f1\": 0.13087562919463042,\n \"f1_stderr\": 0.0026936499511124616,\n \"acc\": 0.44229322757129275,\n \"acc_stderr\": 0.010432110783601959\n },\n \"harness|drop|3\": {\n \"em\": 0.05432046979865772,\n \"em_stderr\": 0.002321097609357669,\n \"f1\": 0.13087562919463042,\n \"f1_stderr\": 0.0026936499511124616\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.1197877179681577,\n \"acc_stderr\": 0.008944213403553046\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.7647987371744278,\n \"acc_stderr\": 0.011920008163650872\n }\n}\n```", "repo_url": "https://huggingface.co/IkariDev/Athena-v4", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|arc:challenge|25_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_drop_3", "data_files": [{"split": "2023_10_25T17_00_26.530924", "path": ["**/details_harness|drop|3_2023-10-25T17-00-26.530924.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-10-25T17-00-26.530924.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_10_25T17_00_26.530924", "path": ["**/details_harness|gsm8k|5_2023-10-25T17-00-26.530924.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-10-25T17-00-26.530924.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hellaswag|10_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-management|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-virology|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-management|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-virology|5_2023-10-09T09-29-40.768179.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-management|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-virology|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["**/details_harness|truthfulqa:mc|0_2023-10-09T09-29-40.768179.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-10-09T09-29-40.768179.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_10_25T17_00_26.530924", "path": ["**/details_harness|winogrande|5_2023-10-25T17-00-26.530924.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-10-25T17-00-26.530924.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_10_09T09_29_40.768179", "path": ["results_2023-10-09T09-29-40.768179.parquet"]}, {"split": "2023_10_25T17_00_26.530924", "path": ["results_2023-10-25T17-00-26.530924.parquet"]}, {"split": "latest", "path": ["results_2023-10-25T17-00-26.530924.parquet"]}]}]}
2023-10-25T16:00:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of IkariDev/Athena-v4 ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model IkariDev/Athena-v4 on the Open LLM Leaderboard. The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-10-25T17:00:26.530924(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of IkariDev/Athena-v4", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model IkariDev/Athena-v4 on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-25T17:00:26.530924(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of IkariDev/Athena-v4", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model IkariDev/Athena-v4 on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-25T17:00:26.530924(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 19, 31, 167, 67, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of IkariDev/Athena-v4## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model IkariDev/Athena-v4 on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-10-25T17:00:26.530924(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
4d74eb58da02670b1e5cd9d784d16fbec2331dd1
# Dataset Card for "donut_6" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
sankettgorey/donut_6
[ "region:us" ]
2023-10-09T08:36:41+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}, {"split": "valid", "path": "data/valid-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "ground_truth", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 350410662.6, "num_examples": 800}, {"name": "test", "num_bytes": 43730265.7, "num_examples": 100}, {"name": "valid", "num_bytes": 43819720.7, "num_examples": 100}], "download_size": 402661296, "dataset_size": 437960649.0}}
2023-10-09T08:38:01+00:00
[]
[]
TAGS #region-us
# Dataset Card for "donut_6" More Information needed
[ "# Dataset Card for \"donut_6\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"donut_6\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"donut_6\"\n\nMore Information needed" ]
07421d1948951fd4ff84e85bb497fa77e9acf686
This dataset is based on our publication *POSQA: Probe the World Models of LLMs with Size Comparisons* ([PDF](https://arxiv.org/abs/2310.13394)). More details: [GitHub Repo](https://github.com/cambridgeltl/POSQA)
cambridgeltl/posqa
[ "task_categories:text-classification", "task_categories:question-answering", "size_categories:1K<n<10K", "language:en", "license:apache-2.0", "arxiv:2310.13394", "region:us" ]
2023-10-09T08:40:22+00:00
{"language": ["en"], "license": "apache-2.0", "size_categories": ["1K<n<10K"], "task_categories": ["text-classification", "question-answering"]}
2023-10-23T08:14:31+00:00
[ "2310.13394" ]
[ "en" ]
TAGS #task_categories-text-classification #task_categories-question-answering #size_categories-1K<n<10K #language-English #license-apache-2.0 #arxiv-2310.13394 #region-us
This dataset is based on our publication *POSQA: Probe the World Models of LLMs with Size Comparisons* (PDF). More details: GitHub Repo
[]
[ "TAGS\n#task_categories-text-classification #task_categories-question-answering #size_categories-1K<n<10K #language-English #license-apache-2.0 #arxiv-2310.13394 #region-us \n" ]
[ 62 ]
[ "passage: TAGS\n#task_categories-text-classification #task_categories-question-answering #size_categories-1K<n<10K #language-English #license-apache-2.0 #arxiv-2310.13394 #region-us \n" ]
f5920b9d3fed0d1133756f080326667feb31232a
# Dataset Card for the Biomedical Domain ### Dataset Summary This dataset was obtain through github (https://github.com/mmahbub/cpgQA/blob/main/dataset/cpgQA-v1.0.csv?plain=1) to Huggin Face for easier access while fine tuning. ### Languages English (en) ## Dataset Structure The dataset is in a CSV format, with each row representing a single review. The following columns are included: * **Title:** Categorises the QA. * **Context:** Gives a context of the QA. * **Question:** The question asked. * **Answer:** The expected and appropriate answer to the question asked.
chloecchng/biomedical_cpgQA
[ "task_categories:question-answering", "size_categories:1K<n<10K", "language:en", "license:apache-2.0", "biology", "medical", "region:us" ]
2023-10-09T08:58:21+00:00
{"language": ["en"], "license": "apache-2.0", "size_categories": ["1K<n<10K"], "task_categories": ["question-answering"], "tags": ["biology", "medical"]}
2023-10-24T16:37:28+00:00
[]
[ "en" ]
TAGS #task_categories-question-answering #size_categories-1K<n<10K #language-English #license-apache-2.0 #biology #medical #region-us
# Dataset Card for the Biomedical Domain ### Dataset Summary This dataset was obtain through github (URL to Huggin Face for easier access while fine tuning. ### Languages English (en) ## Dataset Structure The dataset is in a CSV format, with each row representing a single review. The following columns are included: * Title: Categorises the QA. * Context: Gives a context of the QA. * Question: The question asked. * Answer: The expected and appropriate answer to the question asked.
[ "# Dataset Card for the Biomedical Domain", "### Dataset Summary\n\nThis dataset was obtain through github (URL to Huggin Face for easier access while fine tuning.", "### Languages\n\nEnglish (en)", "## Dataset Structure\n\nThe dataset is in a CSV format, with each row representing a single review. The following columns are included:\n\n* Title: Categorises the QA.\n* Context: Gives a context of the QA.\n* Question: The question asked.\n* Answer: The expected and appropriate answer to the question asked." ]
[ "TAGS\n#task_categories-question-answering #size_categories-1K<n<10K #language-English #license-apache-2.0 #biology #medical #region-us \n", "# Dataset Card for the Biomedical Domain", "### Dataset Summary\n\nThis dataset was obtain through github (URL to Huggin Face for easier access while fine tuning.", "### Languages\n\nEnglish (en)", "## Dataset Structure\n\nThe dataset is in a CSV format, with each row representing a single review. The following columns are included:\n\n* Title: Categorises the QA.\n* Context: Gives a context of the QA.\n* Question: The question asked.\n* Answer: The expected and appropriate answer to the question asked." ]
[ 48, 10, 28, 8, 77 ]
[ "passage: TAGS\n#task_categories-question-answering #size_categories-1K<n<10K #language-English #license-apache-2.0 #biology #medical #region-us \n# Dataset Card for the Biomedical Domain### Dataset Summary\n\nThis dataset was obtain through github (URL to Huggin Face for easier access while fine tuning.### Languages\n\nEnglish (en)## Dataset Structure\n\nThe dataset is in a CSV format, with each row representing a single review. The following columns are included:\n\n* Title: Categorises the QA.\n* Context: Gives a context of the QA.\n* Question: The question asked.\n* Answer: The expected and appropriate answer to the question asked." ]
cf8794043b30f48568992d2f9becb1decb9c4e1e
# Dataset Card for "laion-2b-mistral_captions-1.3M" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
bjoernp/laion-2b-mistral_captions-1.3M
[ "region:us" ]
2023-10-09T08:58:51+00:00
{"dataset_info": {"features": [{"name": "TEXT", "dtype": "string"}, {"name": "RESPONSE", "dtype": "string"}, {"name": "captions", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 853385896.3491833, "num_examples": 1318108}], "download_size": 540262191, "dataset_size": 853385896.3491833}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-09T09:00:11+00:00
[]
[]
TAGS #region-us
# Dataset Card for "laion-2b-mistral_captions-1.3M" More Information needed
[ "# Dataset Card for \"laion-2b-mistral_captions-1.3M\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"laion-2b-mistral_captions-1.3M\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"laion-2b-mistral_captions-1.3M\"\n\nMore Information needed" ]
e6cb76453ead0a371c72cd3662297056c705e811
# Dataset Card for "ASL_Isolated_Swin_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
nandyc/ASL_Isolated_Swin_dataset
[ "region:us" ]
2023-10-09T09:30:50+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "A", "1": "B", "2": "C", "3": "D", "4": "E", "5": "F", "6": "G", "7": "H", "8": "I", "9": "J", "10": "K", "11": "L", "12": "M", "13": "N", "14": "O", "15": "P", "16": "Q", "17": "R", "18": "S", "19": "T", "20": "U", "21": "V", "22": "W", "23": "X", "24": "Y", "25": "Z"}}}}], "splits": [{"name": "train", "num_bytes": 19265862.93533333, "num_examples": 1468}, {"name": "test", "num_bytes": 3392183.4166666665, "num_examples": 260}], "download_size": 22665194, "dataset_size": 22658046.351999998}}
2023-10-09T09:30:57+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ASL_Isolated_Swin_dataset" More Information needed
[ "# Dataset Card for \"ASL_Isolated_Swin_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ASL_Isolated_Swin_dataset\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ASL_Isolated_Swin_dataset\"\n\nMore Information needed" ]
36c2b7911bb39eb8a906eccda6bbdbcff3b8e0ca
# Dataset Card for "common_voice_13_0-ja-whisper-base" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
CWKSC/common_voice_13_0-ja-whisper-base
[ "region:us" ]
2023-10-09T09:39:47+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "input_features", "sequence": {"sequence": "float32"}}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 11557295928, "num_examples": 12032}, {"name": "test", "num_bytes": 4765120552, "num_examples": 4961}], "download_size": 2827086166, "dataset_size": 16322416480}}
2023-10-09T09:44:20+00:00
[]
[]
TAGS #region-us
# Dataset Card for "common_voice_13_0-ja-whisper-base" More Information needed
[ "# Dataset Card for \"common_voice_13_0-ja-whisper-base\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"common_voice_13_0-ja-whisper-base\"\n\nMore Information needed" ]
[ 6, 27 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"common_voice_13_0-ja-whisper-base\"\n\nMore Information needed" ]
0cb6d2b25d2dbf62baea6c91225c66af0997af5c
# Dataset Description The dataset comprises English biographies labeled with occupations and binary genders. This is an occupation classification task, where bias concerning gender can be studied. It includes a subset of 10,000 biographies (8k train/1k dev/1k test) targeting 5 medical occupations (psychologist, surgeon, nurse, dentist, physician), derived from De-Arteaga et al. (2019). We collect and release human rationale annotations for a subset of 100 biographies in two different settings: non-contrastive and contrastive. In the former, the annotators were asked to find the rationale for the question: "Why is the person in the following short bio described as a L?", where L is the gold label occupation, e.g., nurse. In the latter, the question was "Why is the person in the following short bio described as an L rather than an F", where F (foil) is another medical occupation, e.g., physician. You can read more details on the dataset and the annotation process in the paper [Eberle et al. (2023)](https://arxiv.org/abs/2310.11906). # Dataset Structure We provide the `standard` version of the dataset, where examples look as follows. ```json { "text": "He has been a practicing Dentist for 20 years. He has done BDS. He is currently associated with Sree Sai Dental Clinic in Sowkhya Ayurveda Speciality Clinic, Chennai. ... ", "label": 3, } ``` and the newly curated subset of examples including human rationales, dubbed `rationales', where examples look as follows. ```json { "text": "'She is currently practising at Dr Ravindra Ratolikar Dental Clinic in Narayanguda, Hyderabad.", "label": 3, "foil": 2, "words": ['She', 'is', 'currently', 'practising', 'at', 'Dr', 'Ravindra', 'Ratolikar', 'Dental', 'Clinic', 'in', 'Narayanguda', ',', 'Hyderabad', '.'] "rationales": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "contrastive_rationales": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] "annotations": [[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ...] "contrastive_annotations": [[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ...] } ``` # Use To load the `standard` version of the dataset: ```python from datasets import load_dataset dataset = load_dataset("coastalcph/medical-bios", "standard") ``` To load the newly curated subset of examples with human rationales: ```python from datasets import load_dataset dataset = load_dataset("coastalcph/medical-bios", "rationales") ``` # Citation [*Oliver Eberle\*, Ilias Chalkidis\*, Laura Cabello, Stephanie Brandl. Rather a Nurse than a Physician - Contrastive Explanations under Investigation. 2023. In the Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing. Singapore.*](https://aclanthology.org/2023.emnlp-main.427) ``` @inproceedings{eberle-etal-2023-rather, title = "Rather a Nurse than a Physician - Contrastive Explanations under Investigation", author = "Eberle, Oliver and Chalkidis, Ilias and Cabello, Laura and Brandl, Stephanie", booktitle = "Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing", year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.emnlp-main.427", } ```
coastalcph/medical-bios
[ "task_categories:text-classification", "size_categories:1K<n<10K", "language:en", "license:cc-by-nc-sa-4.0", "medical", "arxiv:2310.11906", "region:us" ]
2023-10-09T09:54:50+00:00
{"language": ["en"], "license": "cc-by-nc-sa-4.0", "size_categories": ["1K<n<10K"], "task_categories": ["text-classification"], "pretty_name": "medical-bios", "tags": ["medical"]}
2024-01-23T09:02:44+00:00
[ "2310.11906" ]
[ "en" ]
TAGS #task_categories-text-classification #size_categories-1K<n<10K #language-English #license-cc-by-nc-sa-4.0 #medical #arxiv-2310.11906 #region-us
# Dataset Description The dataset comprises English biographies labeled with occupations and binary genders. This is an occupation classification task, where bias concerning gender can be studied. It includes a subset of 10,000 biographies (8k train/1k dev/1k test) targeting 5 medical occupations (psychologist, surgeon, nurse, dentist, physician), derived from De-Arteaga et al. (2019). We collect and release human rationale annotations for a subset of 100 biographies in two different settings: non-contrastive and contrastive. In the former, the annotators were asked to find the rationale for the question: "Why is the person in the following short bio described as a L?", where L is the gold label occupation, e.g., nurse. In the latter, the question was "Why is the person in the following short bio described as an L rather than an F", where F (foil) is another medical occupation, e.g., physician. You can read more details on the dataset and the annotation process in the paper Eberle et al. (2023). # Dataset Structure We provide the 'standard' version of the dataset, where examples look as follows. and the newly curated subset of examples including human rationales, dubbed 'rationales', where examples look as follows. # Use To load the 'standard' version of the dataset: To load the newly curated subset of examples with human rationales: *Oliver Eberle\*, Ilias Chalkidis\*, Laura Cabello, Stephanie Brandl. Rather a Nurse than a Physician - Contrastive Explanations under Investigation. 2023. In the Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing. Singapore.*
[ "# Dataset Description\n\nThe dataset comprises English biographies labeled with occupations and binary genders. \nThis is an occupation classification task, where bias concerning gender can be studied. \nIt includes a subset of 10,000 biographies (8k train/1k dev/1k test) targeting 5 medical occupations (psychologist, surgeon, nurse, dentist, physician), derived from De-Arteaga et al. (2019). \nWe collect and release human rationale annotations for a subset of 100 biographies in two different settings: non-contrastive and contrastive. \nIn the former, the annotators were asked to find the rationale for the question: \"Why is the person in the following short bio described as a L?\", where L is the gold label occupation, e.g., nurse. \nIn the latter, the question was \"Why is the person in the following short bio described as an L rather than an F\", where F (foil) is another medical occupation, e.g., physician. \n\nYou can read more details on the dataset and the annotation process in the paper Eberle et al. (2023).", "# Dataset Structure\n\nWe provide the 'standard' version of the dataset, where examples look as follows. \n\n\n\nand the newly curated subset of examples including human rationales, dubbed 'rationales', where examples look as follows.", "# Use\n\nTo load the 'standard' version of the dataset:\n\n\n\nTo load the newly curated subset of examples with human rationales:\n\n\n\n*Oliver Eberle\\*, Ilias Chalkidis\\*, Laura Cabello, Stephanie Brandl. Rather a Nurse than a Physician - Contrastive Explanations under Investigation. 2023. In the Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing. Singapore.*" ]
[ "TAGS\n#task_categories-text-classification #size_categories-1K<n<10K #language-English #license-cc-by-nc-sa-4.0 #medical #arxiv-2310.11906 #region-us \n", "# Dataset Description\n\nThe dataset comprises English biographies labeled with occupations and binary genders. \nThis is an occupation classification task, where bias concerning gender can be studied. \nIt includes a subset of 10,000 biographies (8k train/1k dev/1k test) targeting 5 medical occupations (psychologist, surgeon, nurse, dentist, physician), derived from De-Arteaga et al. (2019). \nWe collect and release human rationale annotations for a subset of 100 biographies in two different settings: non-contrastive and contrastive. \nIn the former, the annotators were asked to find the rationale for the question: \"Why is the person in the following short bio described as a L?\", where L is the gold label occupation, e.g., nurse. \nIn the latter, the question was \"Why is the person in the following short bio described as an L rather than an F\", where F (foil) is another medical occupation, e.g., physician. \n\nYou can read more details on the dataset and the annotation process in the paper Eberle et al. (2023).", "# Dataset Structure\n\nWe provide the 'standard' version of the dataset, where examples look as follows. \n\n\n\nand the newly curated subset of examples including human rationales, dubbed 'rationales', where examples look as follows.", "# Use\n\nTo load the 'standard' version of the dataset:\n\n\n\nTo load the newly curated subset of examples with human rationales:\n\n\n\n*Oliver Eberle\\*, Ilias Chalkidis\\*, Laura Cabello, Stephanie Brandl. Rather a Nurse than a Physician - Contrastive Explanations under Investigation. 2023. In the Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing. Singapore.*" ]
[ 58, 257, 57, 103 ]
[ "passage: TAGS\n#task_categories-text-classification #size_categories-1K<n<10K #language-English #license-cc-by-nc-sa-4.0 #medical #arxiv-2310.11906 #region-us \n# Dataset Description\n\nThe dataset comprises English biographies labeled with occupations and binary genders. \nThis is an occupation classification task, where bias concerning gender can be studied. \nIt includes a subset of 10,000 biographies (8k train/1k dev/1k test) targeting 5 medical occupations (psychologist, surgeon, nurse, dentist, physician), derived from De-Arteaga et al. (2019). \nWe collect and release human rationale annotations for a subset of 100 biographies in two different settings: non-contrastive and contrastive. \nIn the former, the annotators were asked to find the rationale for the question: \"Why is the person in the following short bio described as a L?\", where L is the gold label occupation, e.g., nurse. \nIn the latter, the question was \"Why is the person in the following short bio described as an L rather than an F\", where F (foil) is another medical occupation, e.g., physician. \n\nYou can read more details on the dataset and the annotation process in the paper Eberle et al. (2023).# Dataset Structure\n\nWe provide the 'standard' version of the dataset, where examples look as follows. \n\n\n\nand the newly curated subset of examples including human rationales, dubbed 'rationales', where examples look as follows.# Use\n\nTo load the 'standard' version of the dataset:\n\n\n\nTo load the newly curated subset of examples with human rationales:\n\n\n\n*Oliver Eberle\\*, Ilias Chalkidis\\*, Laura Cabello, Stephanie Brandl. Rather a Nurse than a Physician - Contrastive Explanations under Investigation. 2023. In the Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing. Singapore.*" ]
45d9c80ee545ab397e1823431e1f02ea1844e50f
# Dataset Card for Dataset Name <!-- Provide a quick summary of the dataset. --> This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ## Dataset Details - AST: 分科測驗(110前指考) - GSAT: 學科能力測驗 - CAP: 國中教育會考 ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] ### Evaluation #### CAP ##### ChatGPT Total: 199 / 389 (0.5116) | Subject | Accuracy | correct / total | |:------------- | -------- |:--------------- | | chinese | 0.5179 | 29 / 56 | | mathematics | 0.3273 | 36 / 110 | | physics | 0.5000 | 5 / 10 | | chemistry | 0.2727 | 6 / 22 | | biology | 0.4545 | 10 / 22 | | earth science | 0.4000 | 4 / 10 | | geography | 0.5750 | 23 / 40 | | history | 0.8235 | 42 / 51 | | civics | 0.6471 | 44 / 68 | ##### GPT-4-turbo Total: 289 / 389 (0.7429) | Subject | Accuracy | correct / total | |:------------- | -------- |:--------------- | | chinese | 0.8571 | 48 / 56 | | mathematics | 0.4000 | 44 / 110 | | physics | 0.7000 | 7 / 10 | | chemistry | 0.8182 | 18 / 22 | | biology | 0.9091 | 20 / 22 | | earth science | 0.8000 | 8 / 10 | | geography | 0.9000 | 36 / 40 | | history | 0.9608 | 49 / 51 | | civics | 0.8676 | 59 / 68 | ##### Claude-Instant-1 Total: 214 / 389 (0.5501) | Subject | Accuracy | correct / total | |:------------- | -------- |:--------------- | | chinese | 0.6071 | 34 / 56 | | mathematics | 0.2636 | 29 / 110 | | physics | 0.4000 | 4 / 10 | | chemistry | 0.4545 | 10 / 22 | | biology | 0.5909 | 13 / 22 | | earth science | 0.4000 | 4 / 10 | | geography | 0.6500 | 26 / 40 | | history | 0.8431 | 43 / 51 | | civics | 0.7500 | 51 / 68 | ##### Claude-2 Total: 213 / 389 (0.5476) | Subject | Accuracy | correct / total | |:------------- | -------- |:--------------- | | chinese | 0.6071 | 34 / 56 | | mathematics | 0.3727 | 41 / 110 | | physics | 0.6000 | 6 / 10 | | chemistry | 0.5000 | 11 / 22 | | biology | 0.6364 | 14 / 22 | | earth science | 0.7000 | 7 / 10 | | geography | 0.7000 | 28 / 40 | | history | 0.7255 | 37 / 51 | | civics | 0.5147 | 35 / 68 | #### GSAT ##### ChatGPT Total: 180 / 387 (0.4651) | Subject | Accuracy | correct / total | |:------------- | -------- |:--------------- | | chinese | 0.3587 | 33 / 92 | | mathematics | 0.2083 | 5 / 24 | | physics | 0.3684 | 7 / 19 | | chemistry | 0.2917 | 7 / 24 | | biology | 0.2500 | 4 / 16 | | earth science | 0.4211 | 8 / 19 | | geography | 0.5455 | 24 / 44 | | history | 0.6049 | 49 / 81 | | civics | 0.6324 | 43 / 68 | ##### GPT-4-turbo Total: 293 / 387 (0.7571) | Subject | Accuracy | correct / total | |:------------- | -------- |:--------------- | | chinese | 0.7826 | 72 / 92 | | mathematics | 0.2500 | 6 / 24 | | physics | 0.7368 | 14 / 19 | | chemistry | 0.5417 | 13 / 24 | | biology | 0.6875 | 11 / 16 | | earth science | 0.8421 | 16 / 19 | | geography | 0.8864 | 39 / 44 | | history | 0.8519 | 69 / 81 | | civics | 0.7794 | 53 / 68 | ##### Claude-instant-1 Total: 213 / 387 (0.5504) | Subject | Accuracy | correct / total | |:------------- | -------- |:--------------- | | chinese | 0.4891 | 45 / 92 | | mathematics | 0.2500 | 6 / 24 | | physics | 0.3684 | 7 / 19 | | chemistry | 0.3333 | 8 / 24 | | biology | 0.5625 | 9 / 16 | | earth science | 0.4211 | 8 / 19 | | geography | 0.6818 | 30 / 44 | | history | 0.7160 | 58 / 81 | | civics | 0.6176 | 42 / 68 | ##### Claude-2 Total: 180 / 387 (0.4651) | Subject | Accuracy | correct / total | |:------------- | -------- |:--------------- | | chinese | 0.3152 | 29 / 92 | | mathematics | 0.2083 | 5 / 24 | | physics | 0.3684 | 7 / 19 | | chemistry | 0.2917 | 7 / 24 | | biology | 0.1875 | 3 / 16 | | earth science | 0.2632 | 5 / 19 | | geography | 0.6818 | 30 / 44 | | history | 0.6914 | 56 / 81 | | civics | 0.5588 | 38 / 68 | #### AST ##### ChatGPT Total: 193 / 405 (0.4765) | Subject | Accuracy | correct / total | |:----------- | -------- |:--------------- | | chinese | 0.4365 | 55 / 126 | | mathematics | 0.1500 | 3 / 20 | | physics | 0.2368 | 9 / 38 | | chemistry | 0.2759 | 8 / 29 | | biology | 0.7500 | 27 / 36 | | geography | 0.5094 | 27 / 53 | | history | 0.7843 | 40 / 51 | | civics | 0.4615 | 24 / 52 | ##### GPT-4-turbo Total: 280 / 405 (0.6914) | Subject | Accuracy | correct / total | |:----------- | -------- |:--------------- | | chinese | 0.7302 | 92 / 126 | | mathematics | 0.1500 | 3 / 20 | | physics | 0.5263 | 20 / 38 | | chemistry | 0.3103 | 9 / 29 | | biology | 0.8889 | 32 / 36 | | geography | 0.6981 | 37 / 53 | | history | 0.9804 | 50 / 51 | | civics | 0.7115 | 37 / 52 | ##### Claude-instant-1 Total: 219 / 405 (0.5407) | Subject | Accuracy | correct / total | |:----------- | -------- |:--------------- | | chinese | 0.5635 | 71 / 126 | | mathematics | 0.3500 | 7 / 20 | | physics | 0.3947 | 15 / 38 | | chemistry | 0.1724 | 5 / 29 | | biology | 0.6389 | 23 / 36 | | geography | 0.6038 | 32 / 53 | | history | 0.6863 | 35 / 51 | | civics | 0.5962 | 31 / 52 | ##### Claude-2 Total: 185 / 405 (0.4568) | Subject | Accuracy | correct / total | |:----------- | -------- |:--------------- | | chinese | 0.4365 | 55 / 126 | | mathematics | 0.0500 | 1 / 20 | | physics | 0.3421 | 13 / 38 | | chemistry | 0.1034 | 3 / 29 | | biology | 0.4444 | 16 / 36 | | geography | 0.6604 | 35 / 53 | | history | 0.7255 | 37 / 51 | | civics | 0.4808 | 25 / 52 | ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
miulab/tmlu
[ "task_categories:question-answering", "task_categories:text-classification", "size_categories:1K<n<10K", "language:zh", "region:us" ]
2023-10-09T10:15:13+00:00
{"language": ["zh"], "size_categories": ["1K<n<10K"], "task_categories": ["question-answering", "text-classification"], "pretty_name": "TMLU", "configs": [{"config_name": "AST_chinese", "data_files": [{"split": "test", "path": "AST_chinese_test.jsonl"}, {"split": "dev", "path": "AST_chinese_dev.jsonl"}]}, {"config_name": "AST_mathematics", "data_files": [{"split": "test", "path": "AST_mathematics_test.jsonl"}, {"split": "dev", "path": "AST_mathematics_dev.jsonl"}]}, {"config_name": "AST_biology", "data_files": [{"split": "test", "path": "AST_biology_test.jsonl"}, {"split": "dev", "path": "AST_biology_dev.jsonl"}]}, {"config_name": "AST_chemistry", "data_files": [{"split": "test", "path": "AST_chemistry_test.jsonl"}, {"split": "dev", "path": "AST_chemistry_dev.jsonl"}]}, {"config_name": "AST_physics", "data_files": [{"split": "test", "path": "AST_physics_test.jsonl"}, {"split": "dev", "path": "AST_physics_dev.jsonl"}]}, {"config_name": "AST_civics", "data_files": [{"split": "test", "path": "AST_civics_test.jsonl"}, {"split": "dev", "path": "AST_civics_dev.jsonl"}]}, {"config_name": "AST_geography", "data_files": [{"split": "test", "path": "AST_geography_test.jsonl"}, {"split": "dev", "path": "AST_geography_dev.jsonl"}]}, {"config_name": "AST_history", "data_files": [{"split": "test", "path": "AST_history_test.jsonl"}, {"split": "dev", "path": "AST_history_dev.jsonl"}]}, {"config_name": "GSAT_chinese", "data_files": [{"split": "test", "path": "GSAT_chinese_test.jsonl"}, {"split": "dev", "path": "GSAT_chinese_dev.jsonl"}]}, {"config_name": "GSAT_chemistry", "data_files": [{"split": "test", "path": "GSAT_chemistry_test.jsonl"}, {"split": "dev", "path": "GSAT_chemistry_dev.jsonl"}]}, {"config_name": "GSAT_biology", "data_files": [{"split": "test", "path": "GSAT_biology_test.jsonl"}, {"split": "dev", "path": "GSAT_biology_dev.jsonl"}]}, {"config_name": "GSAT_physics", "data_files": [{"split": "test", "path": "GSAT_physics_test.jsonl"}, {"split": "dev", "path": "GSAT_physics_dev.jsonl"}]}, {"config_name": "GSAT_earth_science", "data_files": [{"split": "test", "path": "GSAT_earth_science_test.jsonl"}, {"split": "dev", "path": "GSAT_earth_science_dev.jsonl"}]}, {"config_name": "GSAT_mathematics", "data_files": [{"split": "test", "path": "GSAT_mathematics_test.jsonl"}, {"split": "dev", "path": "GSAT_mathematics_dev.jsonl"}]}, {"config_name": "GSAT_geography", "data_files": [{"split": "test", "path": "GSAT_geography_test.jsonl"}, {"split": "dev", "path": "GSAT_geography_dev.jsonl"}]}, {"config_name": "GSAT_history", "data_files": [{"split": "test", "path": "GSAT_history_test.jsonl"}, {"split": "dev", "path": "GSAT_history_dev.jsonl"}]}, {"config_name": "GSAT_civics", "data_files": [{"split": "test", "path": "GSAT_civics_test.jsonl"}, {"split": "dev", "path": "GSAT_civics_dev.jsonl"}]}, {"config_name": "CAP_mathematics", "data_files": [{"split": "test", "path": "CAP_mathematics_test.jsonl"}, {"split": "dev", "path": "CAP_mathematics_dev.jsonl"}]}, {"config_name": "CAP_biology", "data_files": [{"split": "test", "path": "CAP_biology_test.jsonl"}, {"split": "dev", "path": "CAP_biology_dev.jsonl"}]}, {"config_name": "CAP_physics", "data_files": [{"split": "test", "path": "CAP_physics_test.jsonl"}, {"split": "dev", "path": "CAP_physics_dev.jsonl"}]}, {"config_name": "CAP_chemistry", "data_files": [{"split": "test", "path": "CAP_chemistry_test.jsonl"}, {"split": "dev", "path": "CAP_chemistry_dev.jsonl"}]}, {"config_name": "CAP_earth_science", "data_files": [{"split": "test", "path": "CAP_earth_science_test.jsonl"}, {"split": "dev", "path": "CAP_earth_science_dev.jsonl"}]}, {"config_name": "CAP_civics", "data_files": [{"split": "test", "path": "CAP_civics_test.jsonl"}, {"split": "dev", "path": "CAP_civics_dev.jsonl"}]}, {"config_name": "CAP_history", "data_files": [{"split": "test", "path": "CAP_history_test.jsonl"}, {"split": "dev", "path": "CAP_history_dev.jsonl"}]}, {"config_name": "CAP_geography", "data_files": [{"split": "test", "path": "CAP_geography_test.jsonl"}, {"split": "dev", "path": "CAP_geography_dev.jsonl"}]}, {"config_name": "CAP_chinese", "data_files": [{"split": "test", "path": "CAP_chinese_test.jsonl"}, {"split": "dev", "path": "CAP_chinese_dev.jsonl"}]}, {"config_name": "driving_rule", "data_files": [{"split": "test", "path": "driving_rule_test.jsonl"}, {"split": "dev", "path": "driving_rule_dev.jsonl"}]}, {"config_name": "basic_traditional_chinese_medicine", "data_files": [{"split": "test", "path": "basic_traditional_chinese_medicine_test.jsonl"}, {"split": "dev", "path": "basic_traditional_chinese_medicine_dev.jsonl"}]}, {"config_name": "clinical_traditional_chinese_medicine", "data_files": [{"split": "test", "path": "clinical_traditional_chinese_medicine_test.jsonl"}, {"split": "dev", "path": "clinical_traditional_chinese_medicine_dev.jsonl"}]}, {"config_name": "lawyer_qualification", "data_files": [{"split": "test", "path": "lawyer_qualification_test.jsonl"}, {"split": "dev", "path": "lawyer_qualification_dev.jsonl"}]}, {"config_name": "nutritionist", "data_files": [{"split": "test", "path": "nutritionist_test.jsonl"}, {"split": "dev", "path": "nutritionist_dev.jsonl"}]}, {"config_name": "tour_leader", "data_files": [{"split": "test", "path": "tour_leader_test.jsonl"}, {"split": "dev", "path": "tour_leader_dev.jsonl"}]}, {"config_name": "tour_guide", "data_files": [{"split": "test", "path": "tour_guide_test.jsonl"}, {"split": "dev", "path": "tour_guide_dev.jsonl"}]}, {"config_name": "taiwan_tourist_resources", "data_files": [{"split": "test", "path": "taiwan_tourist_resources_test.jsonl"}, {"split": "dev", "path": "taiwan_tourist_resources_dev.jsonl"}]}, {"config_name": "clinical_psychologist", "data_files": [{"split": "test", "path": "clinical_psychologist_test.jsonl"}, {"split": "dev", "path": "clinical_psychologist_dev.jsonl"}]}, {"config_name": "teacher_qualification", "data_files": [{"split": "test", "path": "teacher_qualification_test.jsonl"}, {"split": "dev", "path": "teacher_qualification_dev.jsonl"}]}, {"config_name": "accountant", "data_files": [{"split": "test", "path": "accountant_test.jsonl"}, {"split": "dev", "path": "accountant_dev.jsonl"}]}]}
2024-02-11T10:49:23+00:00
[]
[ "zh" ]
TAGS #task_categories-question-answering #task_categories-text-classification #size_categories-1K<n<10K #language-Chinese #region-us
Dataset Card for Dataset Name ============================= This dataset card aims to be a base template for new datasets. It has been generated using this raw template. Dataset Details --------------- * AST: 分科測驗(110前指考) * GSAT: 學科能力測驗 * CAP: 國中教育會考 ### Dataset Description * Curated by: * Funded by [optional]: * Shared by [optional]: * Language(s) (NLP): * License: ### Dataset Sources [optional] * Repository: * Paper [optional]: * Demo [optional]: Uses ---- ### Direct Use ### Out-of-Scope Use Dataset Structure ----------------- Dataset Creation ---------------- ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information Bias, Risks, and Limitations ---------------------------- ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: Glossary [optional] ------------------- More Information [optional] --------------------------- ### Evaluation #### CAP ##### ChatGPT Total: 199 / 389 (0.5116) ##### GPT-4-turbo Total: 289 / 389 (0.7429) ##### Claude-Instant-1 Total: 214 / 389 (0.5501) ##### Claude-2 Total: 213 / 389 (0.5476) #### GSAT ##### ChatGPT Total: 180 / 387 (0.4651) ##### GPT-4-turbo Total: 293 / 387 (0.7571) ##### Claude-instant-1 Total: 213 / 387 (0.5504) ##### Claude-2 Total: 180 / 387 (0.4651) #### AST ##### ChatGPT Total: 193 / 405 (0.4765) ##### GPT-4-turbo Total: 280 / 405 (0.6914) ##### Claude-instant-1 Total: 219 / 405 (0.5407) ##### Claude-2 Total: 185 / 405 (0.4568) Dataset Card Authors [optional] ------------------------------- Dataset Card Contact --------------------
[ "### Dataset Description\n\n\n* Curated by:\n* Funded by [optional]:\n* Shared by [optional]:\n* Language(s) (NLP):\n* License:", "### Dataset Sources [optional]\n\n\n* Repository:\n* Paper [optional]:\n* Demo [optional]:\n\n\nUses\n----", "### Direct Use", "### Out-of-Scope Use\n\n\nDataset Structure\n-----------------\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information\n\n\nBias, Risks, and Limitations\n----------------------------", "### Recommendations\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n\n[optional]\n\n\nBibTeX:\n\n\nAPA:\n\n\nGlossary [optional]\n-------------------\n\n\nMore Information [optional]\n---------------------------", "### Evaluation", "#### CAP", "##### ChatGPT\n\n\nTotal: 199 / 389 (0.5116)", "##### GPT-4-turbo\n\n\nTotal: 289 / 389 (0.7429)", "##### Claude-Instant-1\n\n\nTotal: 214 / 389 (0.5501)", "##### Claude-2\n\n\nTotal: 213 / 389 (0.5476)", "#### GSAT", "##### ChatGPT\n\n\nTotal: 180 / 387 (0.4651)", "##### GPT-4-turbo\n\n\nTotal: 293 / 387 (0.7571)", "##### Claude-instant-1\n\n\nTotal: 213 / 387 (0.5504)", "##### Claude-2\n\n\nTotal: 180 / 387 (0.4651)", "#### AST", "##### ChatGPT\n\n\nTotal: 193 / 405 (0.4765)", "##### GPT-4-turbo\n\n\nTotal: 280 / 405 (0.6914)", "##### Claude-instant-1\n\n\nTotal: 219 / 405 (0.5407)", "##### Claude-2\n\n\nTotal: 185 / 405 (0.4568)\n\n\n\nDataset Card Authors [optional]\n-------------------------------\n\n\nDataset Card Contact\n--------------------" ]
[ "TAGS\n#task_categories-question-answering #task_categories-text-classification #size_categories-1K<n<10K #language-Chinese #region-us \n", "### Dataset Description\n\n\n* Curated by:\n* Funded by [optional]:\n* Shared by [optional]:\n* Language(s) (NLP):\n* License:", "### Dataset Sources [optional]\n\n\n* Repository:\n* Paper [optional]:\n* Demo [optional]:\n\n\nUses\n----", "### Direct Use", "### Out-of-Scope Use\n\n\nDataset Structure\n-----------------\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information\n\n\nBias, Risks, and Limitations\n----------------------------", "### Recommendations\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n\n[optional]\n\n\nBibTeX:\n\n\nAPA:\n\n\nGlossary [optional]\n-------------------\n\n\nMore Information [optional]\n---------------------------", "### Evaluation", "#### CAP", "##### ChatGPT\n\n\nTotal: 199 / 389 (0.5116)", "##### GPT-4-turbo\n\n\nTotal: 289 / 389 (0.7429)", "##### Claude-Instant-1\n\n\nTotal: 214 / 389 (0.5501)", "##### Claude-2\n\n\nTotal: 213 / 389 (0.5476)", "#### GSAT", "##### ChatGPT\n\n\nTotal: 180 / 387 (0.4651)", "##### GPT-4-turbo\n\n\nTotal: 293 / 387 (0.7571)", "##### Claude-instant-1\n\n\nTotal: 213 / 387 (0.5504)", "##### Claude-2\n\n\nTotal: 180 / 387 (0.4651)", "#### AST", "##### ChatGPT\n\n\nTotal: 193 / 405 (0.4765)", "##### GPT-4-turbo\n\n\nTotal: 280 / 405 (0.6914)", "##### Claude-instant-1\n\n\nTotal: 219 / 405 (0.5407)", "##### Claude-2\n\n\nTotal: 185 / 405 (0.4568)\n\n\n\nDataset Card Authors [optional]\n-------------------------------\n\n\nDataset Card Contact\n--------------------" ]
[ 46, 40, 32, 4, 22, 7, 4, 7, 10, 9, 5, 9, 20, 64, 4, 3, 15, 19, 18, 15, 4, 15, 19, 17, 15, 4, 15, 18, 17, 34 ]
[ "passage: TAGS\n#task_categories-question-answering #task_categories-text-classification #size_categories-1K<n<10K #language-Chinese #region-us \n### Dataset Description\n\n\n* Curated by:\n* Funded by [optional]:\n* Shared by [optional]:\n* Language(s) (NLP):\n* License:### Dataset Sources [optional]\n\n\n* Repository:\n* Paper [optional]:\n* Demo [optional]:\n\n\nUses\n----### Direct Use### Out-of-Scope Use\n\n\nDataset Structure\n-----------------\n\n\nDataset Creation\n----------------### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information\n\n\nBias, Risks, and Limitations\n----------------------------### Recommendations\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n\n[optional]\n\n\nBibTeX:\n\n\nAPA:\n\n\nGlossary [optional]\n-------------------\n\n\nMore Information [optional]\n---------------------------### Evaluation#### CAP##### ChatGPT\n\n\nTotal: 199 / 389 (0.5116)##### GPT-4-turbo\n\n\nTotal: 289 / 389 (0.7429)##### Claude-Instant-1\n\n\nTotal: 214 / 389 (0.5501)##### Claude-2\n\n\nTotal: 213 / 389 (0.5476)#### GSAT##### ChatGPT\n\n\nTotal: 180 / 387 (0.4651)##### GPT-4-turbo\n\n\nTotal: 293 / 387 (0.7571)##### Claude-instant-1\n\n\nTotal: 213 / 387 (0.5504)##### Claude-2\n\n\nTotal: 180 / 387 (0.4651)#### AST##### ChatGPT\n\n\nTotal: 193 / 405 (0.4765)##### GPT-4-turbo\n\n\nTotal: 280 / 405 (0.6914)##### Claude-instant-1\n\n\nTotal: 219 / 405 (0.5407)" ]
96e3058cee0cebc396f6a592ac28ed484da61911
<div align="center"> <img src="https://github.com/Happylkx/InstructCoder/raw/main/docs/logo.png"> </div> <div align="center"> <a href="https://github.com/qishenghu/CodeInstruct/blob/main/CodeInstruct.pdf">Paper</a> | <a href="https://github.com/qishenghu/CodeInstruct">Code</a> | <a href="https://happylkx.github.io/InstructCoder/">Blog</a> <!-- <a href="https://blog.nus.edu.sg/kaixinli/2023/05/23/codeinstruct/">Blog</a> --> </div> <!-- | [Checkpoints](link_to_checkpoints) --> # InstructCoder (CodeInstruct): Empowering Language Models to Edit Code ## Updates - May 23, 2023: Paper, code and data released. ## Overview InstructCoder is the first dataset designed to adapt LLMs for general code editing. It consists of 114,239 instruction-input-output triplets and covers multiple distinct code editing scenarios, generated by ChatGPT. LLaMA-33B finetuned on InstructCoder performs on par with ChatGPT on a real-world test set derived from GitHub commits. ![](demo.png) In the ever-evolving world of software development, efficient and effective code editing plays a pivotal role. As the demands for faster development cycles and increased productivity continue to rise, harnessing the power of Large Language Models in code editing has emerged as a game-changer. This project aims to revolutionize code editing, empowering developers with intelligent tools and techniques. Though the community has studied extensively on coding with LLMs, our experiments demonstrate that current LLMs still struggle for code editing. To this end, we curate a code editing dataset, dubbed InstructCoder, for improving and evaluating code editing abilities of LLMs. InstructCoder is an instructional dataset containing diverse code-editing tasks, covering 20 scenarios where code editing may be performed. [The dataset is released here.](https://github.com/Happylkx/InstructCoder) Our empirical experiments reveal that open-source LLMs display notable gains in code editing abilities after finetuning on InstructCoder. Some qualitative examples are provided. ## Data Collection InstructCoder is systematically collected through an iterative process. Initial seed tasks are selected from GitHub commits. Inspired by Self-Instruct, we prompt ChatGPT to bootstrap new instructions. Then, plausible scenarios where the filtered instructions may be used are generated. Finally, corresponding code input and output are obtained conditioned on both the instruction and scenario. High-quality samples are manually selected and recurrently added to the task pool for further generation. ## Results The largest model fine-tuned, LLaMA-33B, performs on par with ChatGPT, with 79.3% accuracy on the test set. Some qualitative examples generated by our model are shown here. ## Release We are planning to release the following assets: - [x] Full dataset - [x] Source Code <!-- The LoRA checkpoints are available [here](link_to_checkpoints). --> ## Qualitative Examples In this section, we showcase three qualitative examples generated by the LLaMA-33B model. 1. **Instruction:** Transform the code into a class method called ‘calculate_distance’ that calculates the distance between two points in 3D space. **Input code:** ```python import math def distance(point1, point2): x1, y1, z1 = point1 x2, y2, z2 = point2 dx = x2 - x1 dy = y2 - y1 dz = z2 - z1 return math.sqrt(dx**2 + dy**2 + dz**2) point1 = (0, 0, 0) point2 = (1, 1, 1) print(distance(point1, point2)) ``` Output code: ```python import math class Point: def __init__(self, x, y, z): self.x = x self.y = y self.z = z def calculate_distance(self, other_point): dx = other_point.x - self.x dy = other_point.y - self.y dz = other_point.z - self.z return math.sqrt(dx**2 + dy**2 + dz**2) point1 = Point(0, 0, 0) point2 = Point(1, 1, 1) print(point1.calculate_distance(point2)) ``` 2. Instruction: Integrate error handling into the code that performs batch processing of large datasets, including job failures or incomplete runs. Input code: ```python import pandas as pd def process_orders(orders_file, shipping_file, inventory_file): orders = pd.read_csv(orders_file) shipping = pd.read_csv(shipping_file) inventory = pd.read_csv(inventory_file) # Perform data processing and analysis here... results = {"success": True, "message": "Batch processing completed successfully."} return results orders_file = "orders.csv" shipping_file = "shipping.csv" inventory_file = "inventory.csv" results = process_orders(orders_file, shipping_file, inventory_file) print(results) ``` Output code: ```python import pandas as pd import logging logging.basicConfig(filename='batch_processing.log', level=logging.ERROR) def process_orders(orders_file, shipping_file, inventory_file): try: orders = pd.read_csv(orders_file) shipping = pd.read_csv(shipping_file) inventory = pd.read_csv(inventory_file) # Perform data processing and analysis here... results = {"success": True, "message": "Batch processing completed successfully."} return results except Exception as e: logging.error(f"Error processing batch: {e}") results = {"success": False, "message": "Batch processing failed. Please check the input files for any issues."} return results orders_file = "orders.csv" shipping_file = "shipping.csv" inventory_file = "inventory.csv" results = process_orders(orders_file, shipping_file, inventory_file) print(results) ``` 3. Instruction: Refactor any code that uses ‘exec’ or ‘eval’ for security reasons. Input code: ```python Copy code game_data = { "score": 0, "level": 1 } def execute_script(script): exec(script) script = """ game_data["score"] += 10 if game_data["score"] >= 100: game_data["level"] += 1 """ execute_script(script) print(game_data) ``` Output code: ```python game_data = { "score": 0, "level": 1 } def execute_script(script): compiled_script = compile(script, "", "exec") exec(compiled_script, game_data) script = """ game_data["score"] += 10 if game_data["score"] >= 100: game_data["level"] += 1 """ execute_script(script) print(game_data) ``` Now the script is not able to access the data in game_data. ## Limitations and Safety Concerns By recognizing and addressing limitations and safety concerns, we hope developers can foster responsible AI deployment and use, promoting a more ethical, equitable, and secure future for large language models in code production. 1. **License:** You must ensure compliance with the corresponding licenses of the LLMs. It is important to thoroughly review the licenses and make sure that your usage is in accordance with their terms. For instance, you are not allowed to use LLaMA commercially, for it is released under a noncommercial license. 2. **Incomplete or Imperfect Knowledge:** LLMs are trained on vast amounts of data, which may not always be up-to-date or entirely accurate. For example, the APIs of a library may change over time. Consequently, the information provided by the models could be outdated, inaccurate, or even misleading in some instances. 3. **Overuse and Dependency:** Users might incorrectly interpret or rely too heavily on the outputs generated by large language models. It is crucial to provide proper guidance and promote an understanding of the model’s limitations, encouraging users to critically assess and verify the information or suggestions provided. Please make sure to check the generation of the models before using them. Overreliance on large language models could lead to complacency, potentially causing users to undervalue human intelligence, such as creativity and critical thinking. We encourage users to use AI as a tool to supplement, rather than replace, human input and judgment. 4. **Malicious Use:** There is a risk that malicious actors might use the tools for nefarious purposes, such as generating malicious software. It is important to monitor the use and deployment of these models, track and report abuse, and develop countermeasures to address potential malicious activity. 5. **Bias and Discrimination:** Language models can inherit societal biases present in their training data, possibly leading to discriminatory or biased generations. Though our dataset is not likely to contain such toxic data, they may appear in the responses because of the base LLMs. ## Citation Feel free to cite our work if you find it interesting or use the data: ```plain @misc{2023instructcoder, title={InstructCoder: Empowering Language Models for Code Editing}, author={Qisheng Hu and Kaixin Li and Xu Zhao and Yuxi Xie and Tiedong Liu and Hui Chen and Qizhe Xie and Junxian He}, year={2023}, eprint={2310.20329}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ## Conclusion The integration of AI into code editing represents a significant milestone in the evolution of software development. By leveraging AI’s capabilities in understanding code semantics, patterns, and best practices, developers can unlock new levels of productivity, code quality, and efficiency. This project we’ve explored demonstrates the immense potential of intelligent code editing tools. As the software development landscape continues to evolve, embracing AI is poised to become a standard practice, and sets the stage for a future where developers can focus more on creativity and problem-solving, while AI handles the mundane aspects of coding.
happylkx/InstructCoder
[ "task_categories:text-generation", "size_categories:100K<n<1M", "language:en", "code", "arxiv:2310.20329", "region:us" ]
2023-10-09T10:21:14+00:00
{"language": ["en"], "size_categories": ["100K<n<1M"], "task_categories": ["text-generation"], "pretty_name": "instruct_coder", "tags": ["code"]}
2023-11-09T08:59:57+00:00
[ "2310.20329" ]
[ "en" ]
TAGS #task_categories-text-generation #size_categories-100K<n<1M #language-English #code #arxiv-2310.20329 #region-us
<div align="center"> <img src="URL </div> <div align="center"> <a href="URL | <a href="URL | <a href="URL </div> # InstructCoder (CodeInstruct): Empowering Language Models to Edit Code ## Updates - May 23, 2023: Paper, code and data released. ## Overview InstructCoder is the first dataset designed to adapt LLMs for general code editing. It consists of 114,239 instruction-input-output triplets and covers multiple distinct code editing scenarios, generated by ChatGPT. LLaMA-33B finetuned on InstructCoder performs on par with ChatGPT on a real-world test set derived from GitHub commits. ![](URL) In the ever-evolving world of software development, efficient and effective code editing plays a pivotal role. As the demands for faster development cycles and increased productivity continue to rise, harnessing the power of Large Language Models in code editing has emerged as a game-changer. This project aims to revolutionize code editing, empowering developers with intelligent tools and techniques. Though the community has studied extensively on coding with LLMs, our experiments demonstrate that current LLMs still struggle for code editing. To this end, we curate a code editing dataset, dubbed InstructCoder, for improving and evaluating code editing abilities of LLMs. InstructCoder is an instructional dataset containing diverse code-editing tasks, covering 20 scenarios where code editing may be performed. The dataset is released here. Our empirical experiments reveal that open-source LLMs display notable gains in code editing abilities after finetuning on InstructCoder. Some qualitative examples are provided. ## Data Collection InstructCoder is systematically collected through an iterative process. Initial seed tasks are selected from GitHub commits. Inspired by Self-Instruct, we prompt ChatGPT to bootstrap new instructions. Then, plausible scenarios where the filtered instructions may be used are generated. Finally, corresponding code input and output are obtained conditioned on both the instruction and scenario. High-quality samples are manually selected and recurrently added to the task pool for further generation. ## Results The largest model fine-tuned, LLaMA-33B, performs on par with ChatGPT, with 79.3% accuracy on the test set. Some qualitative examples generated by our model are shown here. ## Release We are planning to release the following assets: - [x] Full dataset - [x] Source Code ## Qualitative Examples In this section, we showcase three qualitative examples generated by the LLaMA-33B model. 1. Instruction: Transform the code into a class method called ‘calculate_distance’ that calculates the distance between two points in 3D space. Input code: Output code: 2. Instruction: Integrate error handling into the code that performs batch processing of large datasets, including job failures or incomplete runs. Input code: Output code: 3. Instruction: Refactor any code that uses ‘exec’ or ‘eval’ for security reasons. Input code: Output code: Now the script is not able to access the data in game_data. ## Limitations and Safety Concerns By recognizing and addressing limitations and safety concerns, we hope developers can foster responsible AI deployment and use, promoting a more ethical, equitable, and secure future for large language models in code production. 1. License: You must ensure compliance with the corresponding licenses of the LLMs. It is important to thoroughly review the licenses and make sure that your usage is in accordance with their terms. For instance, you are not allowed to use LLaMA commercially, for it is released under a noncommercial license. 2. Incomplete or Imperfect Knowledge: LLMs are trained on vast amounts of data, which may not always be up-to-date or entirely accurate. For example, the APIs of a library may change over time. Consequently, the information provided by the models could be outdated, inaccurate, or even misleading in some instances. 3. Overuse and Dependency: Users might incorrectly interpret or rely too heavily on the outputs generated by large language models. It is crucial to provide proper guidance and promote an understanding of the model’s limitations, encouraging users to critically assess and verify the information or suggestions provided. Please make sure to check the generation of the models before using them. Overreliance on large language models could lead to complacency, potentially causing users to undervalue human intelligence, such as creativity and critical thinking. We encourage users to use AI as a tool to supplement, rather than replace, human input and judgment. 4. Malicious Use: There is a risk that malicious actors might use the tools for nefarious purposes, such as generating malicious software. It is important to monitor the use and deployment of these models, track and report abuse, and develop countermeasures to address potential malicious activity. 5. Bias and Discrimination: Language models can inherit societal biases present in their training data, possibly leading to discriminatory or biased generations. Though our dataset is not likely to contain such toxic data, they may appear in the responses because of the base LLMs. Feel free to cite our work if you find it interesting or use the data: ## Conclusion The integration of AI into code editing represents a significant milestone in the evolution of software development. By leveraging AI’s capabilities in understanding code semantics, patterns, and best practices, developers can unlock new levels of productivity, code quality, and efficiency. This project we’ve explored demonstrates the immense potential of intelligent code editing tools. As the software development landscape continues to evolve, embracing AI is poised to become a standard practice, and sets the stage for a future where developers can focus more on creativity and problem-solving, while AI handles the mundane aspects of coding.
[ "# InstructCoder (CodeInstruct): Empowering Language Models to Edit Code", "## Updates\n- May 23, 2023: Paper, code and data released.", "## Overview\nInstructCoder is the first dataset designed to adapt LLMs for general code editing. It consists of 114,239 instruction-input-output triplets and covers multiple distinct code editing scenarios, generated by ChatGPT. LLaMA-33B finetuned on InstructCoder performs on par with ChatGPT on a real-world test set derived from GitHub commits.\n\n![](URL)\n\nIn the ever-evolving world of software development, efficient and effective code editing plays a pivotal role. As the demands for faster development cycles and increased productivity continue to rise, harnessing the power of Large Language Models in code editing has emerged as a game-changer. This project aims to revolutionize code editing, empowering developers with intelligent tools and techniques.\n\nThough the community has studied extensively on coding with LLMs, our experiments demonstrate that current LLMs still struggle for code editing. To this end, we curate a code editing dataset, dubbed InstructCoder, for improving and evaluating code editing abilities of LLMs. InstructCoder is an instructional dataset containing diverse code-editing tasks, covering 20 scenarios where code editing may be performed. The dataset is released here. Our empirical experiments reveal that open-source LLMs display notable gains in code editing abilities after finetuning on InstructCoder. Some qualitative examples are provided.", "## Data Collection\nInstructCoder is systematically collected through an iterative process. Initial seed tasks are selected from GitHub commits. Inspired by Self-Instruct, we prompt ChatGPT to bootstrap new instructions. Then, plausible scenarios where the filtered instructions may be used are generated. Finally, corresponding code input and output are obtained conditioned on both the instruction and scenario. High-quality samples are manually selected and recurrently added to the task pool for further generation.", "## Results\nThe largest model fine-tuned, LLaMA-33B, performs on par with ChatGPT, with 79.3% accuracy on the test set. Some qualitative examples generated by our model are shown here.", "## Release\nWe are planning to release the following assets:\n\n- [x] Full dataset\n- [x] Source Code", "## Qualitative Examples\nIn this section, we showcase three qualitative examples generated by the LLaMA-33B model.\n\n1. Instruction: Transform the code into a class method called ‘calculate_distance’ that calculates the distance between two points in 3D space.\n\nInput code:\n\n\nOutput code:\n\n\n\n2. Instruction: Integrate error handling into the code that performs batch processing of large datasets, including job failures or incomplete runs.\nInput code:\n\n\n\nOutput code:\n\n\n\n3. Instruction: Refactor any code that uses ‘exec’ or ‘eval’ for security reasons.\nInput code:\n\n\nOutput code:\n\n\n\n\nNow the script is not able to access the data in game_data.", "## Limitations and Safety Concerns\n\nBy recognizing and addressing limitations and safety concerns, we hope developers can foster responsible AI deployment and use, promoting a more ethical, equitable, and secure future for large language models in code production.\n\n1. License: You must ensure compliance with the corresponding licenses of the LLMs. It is important to thoroughly review the licenses and make sure that your usage is in accordance with their terms. For instance, you are not allowed to use LLaMA commercially, for it is released under a noncommercial license.\n\n2. Incomplete or Imperfect Knowledge: LLMs are trained on vast amounts of data, which may not always be up-to-date or entirely accurate. For example, the APIs of a library may change over time. Consequently, the information provided by the models could be outdated, inaccurate, or even misleading in some instances.\n\n3. Overuse and Dependency: Users might incorrectly interpret or rely too heavily on the outputs generated by large language models. It is crucial to provide proper guidance and promote an understanding of the model’s limitations, encouraging users to critically assess and verify the information or suggestions provided. Please make sure to check the generation of the models before using them.\n\nOverreliance on large language models could lead to complacency, potentially causing users to undervalue human intelligence, such as creativity and critical thinking. We encourage users to use AI as a tool to supplement, rather than replace, human input and judgment.\n\n4. Malicious Use: There is a risk that malicious actors might use the tools for nefarious purposes, such as generating malicious software. It is important to monitor the use and deployment of these models, track and report abuse, and develop countermeasures to address potential malicious activity.\n\n5. Bias and Discrimination: Language models can inherit societal biases present in their training data, possibly leading to discriminatory or biased generations. Though our dataset is not likely to contain such toxic data, they may appear in the responses because of the base LLMs.\n\nFeel free to cite our work if you find it interesting or use the data:", "## Conclusion\nThe integration of AI into code editing represents a significant milestone in the evolution of software development. By leveraging AI’s capabilities in understanding code semantics, patterns, and best practices, developers can unlock new levels of productivity, code quality, and efficiency. This project we’ve explored demonstrates the immense potential of intelligent code editing tools. As the software development landscape continues to evolve, embracing AI is poised to become a standard practice, and sets the stage for a future where developers can focus more on creativity and problem-solving, while AI handles the mundane aspects of coding." ]
[ "TAGS\n#task_categories-text-generation #size_categories-100K<n<1M #language-English #code #arxiv-2310.20329 #region-us \n", "# InstructCoder (CodeInstruct): Empowering Language Models to Edit Code", "## Updates\n- May 23, 2023: Paper, code and data released.", "## Overview\nInstructCoder is the first dataset designed to adapt LLMs for general code editing. It consists of 114,239 instruction-input-output triplets and covers multiple distinct code editing scenarios, generated by ChatGPT. LLaMA-33B finetuned on InstructCoder performs on par with ChatGPT on a real-world test set derived from GitHub commits.\n\n![](URL)\n\nIn the ever-evolving world of software development, efficient and effective code editing plays a pivotal role. As the demands for faster development cycles and increased productivity continue to rise, harnessing the power of Large Language Models in code editing has emerged as a game-changer. This project aims to revolutionize code editing, empowering developers with intelligent tools and techniques.\n\nThough the community has studied extensively on coding with LLMs, our experiments demonstrate that current LLMs still struggle for code editing. To this end, we curate a code editing dataset, dubbed InstructCoder, for improving and evaluating code editing abilities of LLMs. InstructCoder is an instructional dataset containing diverse code-editing tasks, covering 20 scenarios where code editing may be performed. The dataset is released here. Our empirical experiments reveal that open-source LLMs display notable gains in code editing abilities after finetuning on InstructCoder. Some qualitative examples are provided.", "## Data Collection\nInstructCoder is systematically collected through an iterative process. Initial seed tasks are selected from GitHub commits. Inspired by Self-Instruct, we prompt ChatGPT to bootstrap new instructions. Then, plausible scenarios where the filtered instructions may be used are generated. Finally, corresponding code input and output are obtained conditioned on both the instruction and scenario. High-quality samples are manually selected and recurrently added to the task pool for further generation.", "## Results\nThe largest model fine-tuned, LLaMA-33B, performs on par with ChatGPT, with 79.3% accuracy on the test set. Some qualitative examples generated by our model are shown here.", "## Release\nWe are planning to release the following assets:\n\n- [x] Full dataset\n- [x] Source Code", "## Qualitative Examples\nIn this section, we showcase three qualitative examples generated by the LLaMA-33B model.\n\n1. Instruction: Transform the code into a class method called ‘calculate_distance’ that calculates the distance between two points in 3D space.\n\nInput code:\n\n\nOutput code:\n\n\n\n2. Instruction: Integrate error handling into the code that performs batch processing of large datasets, including job failures or incomplete runs.\nInput code:\n\n\n\nOutput code:\n\n\n\n3. Instruction: Refactor any code that uses ‘exec’ or ‘eval’ for security reasons.\nInput code:\n\n\nOutput code:\n\n\n\n\nNow the script is not able to access the data in game_data.", "## Limitations and Safety Concerns\n\nBy recognizing and addressing limitations and safety concerns, we hope developers can foster responsible AI deployment and use, promoting a more ethical, equitable, and secure future for large language models in code production.\n\n1. License: You must ensure compliance with the corresponding licenses of the LLMs. It is important to thoroughly review the licenses and make sure that your usage is in accordance with their terms. For instance, you are not allowed to use LLaMA commercially, for it is released under a noncommercial license.\n\n2. Incomplete or Imperfect Knowledge: LLMs are trained on vast amounts of data, which may not always be up-to-date or entirely accurate. For example, the APIs of a library may change over time. Consequently, the information provided by the models could be outdated, inaccurate, or even misleading in some instances.\n\n3. Overuse and Dependency: Users might incorrectly interpret or rely too heavily on the outputs generated by large language models. It is crucial to provide proper guidance and promote an understanding of the model’s limitations, encouraging users to critically assess and verify the information or suggestions provided. Please make sure to check the generation of the models before using them.\n\nOverreliance on large language models could lead to complacency, potentially causing users to undervalue human intelligence, such as creativity and critical thinking. We encourage users to use AI as a tool to supplement, rather than replace, human input and judgment.\n\n4. Malicious Use: There is a risk that malicious actors might use the tools for nefarious purposes, such as generating malicious software. It is important to monitor the use and deployment of these models, track and report abuse, and develop countermeasures to address potential malicious activity.\n\n5. Bias and Discrimination: Language models can inherit societal biases present in their training data, possibly leading to discriminatory or biased generations. Though our dataset is not likely to contain such toxic data, they may appear in the responses because of the base LLMs.\n\nFeel free to cite our work if you find it interesting or use the data:", "## Conclusion\nThe integration of AI into code editing represents a significant milestone in the evolution of software development. By leveraging AI’s capabilities in understanding code semantics, patterns, and best practices, developers can unlock new levels of productivity, code quality, and efficiency. This project we’ve explored demonstrates the immense potential of intelligent code editing tools. As the software development landscape continues to evolve, embracing AI is poised to become a standard practice, and sets the stage for a future where developers can focus more on creativity and problem-solving, while AI handles the mundane aspects of coding." ]
[ 44, 19, 15, 348, 116, 51, 24, 159, 499, 144 ]
[ "passage: TAGS\n#task_categories-text-generation #size_categories-100K<n<1M #language-English #code #arxiv-2310.20329 #region-us \n# InstructCoder (CodeInstruct): Empowering Language Models to Edit Code## Updates\n- May 23, 2023: Paper, code and data released.## Overview\nInstructCoder is the first dataset designed to adapt LLMs for general code editing. It consists of 114,239 instruction-input-output triplets and covers multiple distinct code editing scenarios, generated by ChatGPT. LLaMA-33B finetuned on InstructCoder performs on par with ChatGPT on a real-world test set derived from GitHub commits.\n\n![](URL)\n\nIn the ever-evolving world of software development, efficient and effective code editing plays a pivotal role. As the demands for faster development cycles and increased productivity continue to rise, harnessing the power of Large Language Models in code editing has emerged as a game-changer. This project aims to revolutionize code editing, empowering developers with intelligent tools and techniques.\n\nThough the community has studied extensively on coding with LLMs, our experiments demonstrate that current LLMs still struggle for code editing. To this end, we curate a code editing dataset, dubbed InstructCoder, for improving and evaluating code editing abilities of LLMs. InstructCoder is an instructional dataset containing diverse code-editing tasks, covering 20 scenarios where code editing may be performed. The dataset is released here. Our empirical experiments reveal that open-source LLMs display notable gains in code editing abilities after finetuning on InstructCoder. Some qualitative examples are provided.", "passage: ## Data Collection\nInstructCoder is systematically collected through an iterative process. Initial seed tasks are selected from GitHub commits. Inspired by Self-Instruct, we prompt ChatGPT to bootstrap new instructions. Then, plausible scenarios where the filtered instructions may be used are generated. Finally, corresponding code input and output are obtained conditioned on both the instruction and scenario. High-quality samples are manually selected and recurrently added to the task pool for further generation.## Results\nThe largest model fine-tuned, LLaMA-33B, performs on par with ChatGPT, with 79.3% accuracy on the test set. Some qualitative examples generated by our model are shown here.## Release\nWe are planning to release the following assets:\n\n- [x] Full dataset\n- [x] Source Code## Qualitative Examples\nIn this section, we showcase three qualitative examples generated by the LLaMA-33B model.\n\n1. Instruction: Transform the code into a class method called ‘calculate_distance’ that calculates the distance between two points in 3D space.\n\nInput code:\n\n\nOutput code:\n\n\n\n2. Instruction: Integrate error handling into the code that performs batch processing of large datasets, including job failures or incomplete runs.\nInput code:\n\n\n\nOutput code:\n\n\n\n3. Instruction: Refactor any code that uses ‘exec’ or ‘eval’ for security reasons.\nInput code:\n\n\nOutput code:\n\n\n\n\nNow the script is not able to access the data in game_data." ]
6f68b90b23f3f546fcb658f1701ec7822056ae3a
# Dataset Card for "voxelgym_5c_42x42_500" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Cubpaw/voxelgym_5c_42x42_500
[ "region:us" ]
2023-10-09T10:26:06+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": "image"}, {"name": "rgb_label", "dtype": "image"}, {"name": "path_label", "dtype": "image"}, {"name": "path_rgb_label", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 373246.0, "num_examples": 400}, {"name": "validation", "num_bytes": 92510.0, "num_examples": 100}], "download_size": 403202, "dataset_size": 465756.0}}
2023-10-09T10:26:15+00:00
[]
[]
TAGS #region-us
# Dataset Card for "voxelgym_5c_42x42_500" More Information needed
[ "# Dataset Card for \"voxelgym_5c_42x42_500\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"voxelgym_5c_42x42_500\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"voxelgym_5c_42x42_500\"\n\nMore Information needed" ]
29a82cdd2bd868a741b56957a22d327379a2c861
# Dataset Card for "llama2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
promptora11/llama2
[ "region:us" ]
2023-10-09T10:27:02+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 338808, "num_examples": 200}], "download_size": 201257, "dataset_size": 338808}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-09T10:27:06+00:00
[]
[]
TAGS #region-us
# Dataset Card for "llama2" More Information needed
[ "# Dataset Card for \"llama2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"llama2\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"llama2\"\n\nMore Information needed" ]
9fa5de7809f0159950e2d3276843ee264d93993c
# Dataset Card for "donut_two_layouts" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
sankettgorey/donut_two_layouts
[ "region:us" ]
2023-10-09T10:41:19+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}, {"split": "valid", "path": "data/valid-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "ground_truth", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 511297223.058686, "num_examples": 1279}, {"name": "test", "num_bytes": 64193822.85115697, "num_examples": 160}, {"name": "valid", "num_bytes": 63774381.85115697, "num_examples": 160}], "download_size": 575142166, "dataset_size": 639265427.7609999}}
2023-10-09T10:42:10+00:00
[]
[]
TAGS #region-us
# Dataset Card for "donut_two_layouts" More Information needed
[ "# Dataset Card for \"donut_two_layouts\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"donut_two_layouts\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"donut_two_layouts\"\n\nMore Information needed" ]
ca9134134a901472bfef25eae2860cd7cb340dd2
Few shot and reference files for LIMMITS 24 Challenge - https://sites.google.com/view/limmits24/ IndicTTS speakers are taken from https://www.iitm.ac.in/donlab/tts/detailed_statistics.php
SYSPIN/LIMMITS24_target_speaker_fewshot_samples
[ "license:cc-by-4.0", "region:us" ]
2023-10-09T10:46:47+00:00
{"license": "cc-by-4.0"}
2024-02-09T06:05:08+00:00
[]
[]
TAGS #license-cc-by-4.0 #region-us
Few shot and reference files for LIMMITS 24 Challenge - URL IndicTTS speakers are taken from URL
[]
[ "TAGS\n#license-cc-by-4.0 #region-us \n" ]
[ 15 ]
[ "passage: TAGS\n#license-cc-by-4.0 #region-us \n" ]
d0f532ead77e2d4d7affbba97453d667f1187d13
# Dataset Card for Evaluation run of migtissera/SynthIA-7B-v1.3 ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/migtissera/SynthIA-7B-v1.3 - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [migtissera/SynthIA-7B-v1.3](https://huggingface.co/migtissera/SynthIA-7B-v1.3) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 3 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_migtissera__SynthIA-7B-v1.3", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-10-28T14:24:19.449160](https://huggingface.co/datasets/open-llm-leaderboard/details_migtissera__SynthIA-7B-v1.3/blob/main/results_2023-10-28T14-24-19.449160.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "em": 0.34375, "em_stderr": 0.004864023482291936, "f1": 0.43760067114094225, "f1_stderr": 0.004666454920595155, "acc": 0.4821837715185681, "acc_stderr": 0.010982434159881403 }, "harness|drop|3": { "em": 0.34375, "em_stderr": 0.004864023482291936, "f1": 0.43760067114094225, "f1_stderr": 0.004666454920595155 }, "harness|gsm8k|5": { "acc": 0.17589082638362397, "acc_stderr": 0.010487120635539617 }, "harness|winogrande|5": { "acc": 0.7884767166535123, "acc_stderr": 0.011477747684223188 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_migtissera__SynthIA-7B-v1.3
[ "region:us" ]
2023-10-09T10:48:41+00:00
{"pretty_name": "Evaluation run of migtissera/SynthIA-7B-v1.3", "dataset_summary": "Dataset automatically created during the evaluation run of model [migtissera/SynthIA-7B-v1.3](https://huggingface.co/migtissera/SynthIA-7B-v1.3) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 3 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_migtissera__SynthIA-7B-v1.3\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-10-28T14:24:19.449160](https://huggingface.co/datasets/open-llm-leaderboard/details_migtissera__SynthIA-7B-v1.3/blob/main/results_2023-10-28T14-24-19.449160.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"em\": 0.34375,\n \"em_stderr\": 0.004864023482291936,\n \"f1\": 0.43760067114094225,\n \"f1_stderr\": 0.004666454920595155,\n \"acc\": 0.4821837715185681,\n \"acc_stderr\": 0.010982434159881403\n },\n \"harness|drop|3\": {\n \"em\": 0.34375,\n \"em_stderr\": 0.004864023482291936,\n \"f1\": 0.43760067114094225,\n \"f1_stderr\": 0.004666454920595155\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.17589082638362397,\n \"acc_stderr\": 0.010487120635539617\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.7884767166535123,\n \"acc_stderr\": 0.011477747684223188\n }\n}\n```", "repo_url": "https://huggingface.co/migtissera/SynthIA-7B-v1.3", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|arc:challenge|25_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|arc:challenge|25_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_drop_3", "data_files": [{"split": "2023_10_28T14_24_19.449160", "path": ["**/details_harness|drop|3_2023-10-28T14-24-19.449160.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-10-28T14-24-19.449160.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_10_28T14_24_19.449160", "path": ["**/details_harness|gsm8k|5_2023-10-28T14-24-19.449160.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-10-28T14-24-19.449160.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hellaswag|10_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hellaswag|10_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-management|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-virology|5_2023-10-09T11-48-18.823660.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-management|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-virology|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-management|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-virology|5_2023-10-09T11-58-55.532772.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-management|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-management|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-virology|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-virology|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["**/details_harness|truthfulqa:mc|0_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["**/details_harness|truthfulqa:mc|0_2023-10-09T11-58-55.532772.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-10-09T11-58-55.532772.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_10_28T14_24_19.449160", "path": ["**/details_harness|winogrande|5_2023-10-28T14-24-19.449160.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-10-28T14-24-19.449160.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_10_09T11_48_18.823660", "path": ["results_2023-10-09T11-48-18.823660.parquet"]}, {"split": "2023_10_09T11_58_55.532772", "path": ["results_2023-10-09T11-58-55.532772.parquet"]}, {"split": "2023_10_28T14_24_19.449160", "path": ["results_2023-10-28T14-24-19.449160.parquet"]}, {"split": "latest", "path": ["results_2023-10-28T14-24-19.449160.parquet"]}]}]}
2023-10-28T13:24:32+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of migtissera/SynthIA-7B-v1.3 ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model migtissera/SynthIA-7B-v1.3 on the Open LLM Leaderboard. The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 3 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-10-28T14:24:19.449160(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of migtissera/SynthIA-7B-v1.3", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model migtissera/SynthIA-7B-v1.3 on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 3 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-28T14:24:19.449160(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of migtissera/SynthIA-7B-v1.3", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model migtissera/SynthIA-7B-v1.3 on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 3 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-28T14:24:19.449160(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 23, 31, 171, 66, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of migtissera/SynthIA-7B-v1.3## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model migtissera/SynthIA-7B-v1.3 on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 3 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-10-28T14:24:19.449160(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
55d232f57667abeaf7fa88f5f953e55f56f96de5
# Dataset Card for Evaluation run of ehartford/dolphin-2.0-mistral-7b ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/ehartford/dolphin-2.0-mistral-7b - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [ehartford/dolphin-2.0-mistral-7b](https://huggingface.co/ehartford/dolphin-2.0-mistral-7b) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_ehartford__dolphin-2.0-mistral-7b", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-10-29T11:13:09.242733](https://huggingface.co/datasets/open-llm-leaderboard/details_ehartford__dolphin-2.0-mistral-7b/blob/main/results_2023-10-29T11-13-09.242733.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "em": 0.32843959731543626, "em_stderr": 0.0048096109452043685, "f1": 0.3948563338926188, "f1_stderr": 0.004687030417639075, "acc": 0.47012697069663045, "acc_stderr": 0.011418752673563709 }, "harness|drop|3": { "em": 0.32843959731543626, "em_stderr": 0.0048096109452043685, "f1": 0.3948563338926188, "f1_stderr": 0.004687030417639075 }, "harness|gsm8k|5": { "acc": 0.1865049279757392, "acc_stderr": 0.010729140039689892 }, "harness|winogrande|5": { "acc": 0.7537490134175217, "acc_stderr": 0.012108365307437524 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_ehartford__dolphin-2.0-mistral-7b
[ "region:us" ]
2023-10-09T11:06:50+00:00
{"pretty_name": "Evaluation run of ehartford/dolphin-2.0-mistral-7b", "dataset_summary": "Dataset automatically created during the evaluation run of model [ehartford/dolphin-2.0-mistral-7b](https://huggingface.co/ehartford/dolphin-2.0-mistral-7b) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_ehartford__dolphin-2.0-mistral-7b\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-10-29T11:13:09.242733](https://huggingface.co/datasets/open-llm-leaderboard/details_ehartford__dolphin-2.0-mistral-7b/blob/main/results_2023-10-29T11-13-09.242733.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"em\": 0.32843959731543626,\n \"em_stderr\": 0.0048096109452043685,\n \"f1\": 0.3948563338926188,\n \"f1_stderr\": 0.004687030417639075,\n \"acc\": 0.47012697069663045,\n \"acc_stderr\": 0.011418752673563709\n },\n \"harness|drop|3\": {\n \"em\": 0.32843959731543626,\n \"em_stderr\": 0.0048096109452043685,\n \"f1\": 0.3948563338926188,\n \"f1_stderr\": 0.004687030417639075\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.1865049279757392,\n \"acc_stderr\": 0.010729140039689892\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.7537490134175217,\n \"acc_stderr\": 0.012108365307437524\n }\n}\n```", "repo_url": "https://huggingface.co/ehartford/dolphin-2.0-mistral-7b", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|arc:challenge|25_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_drop_3", "data_files": [{"split": "2023_10_29T11_13_09.242733", "path": ["**/details_harness|drop|3_2023-10-29T11-13-09.242733.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-10-29T11-13-09.242733.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_10_29T11_13_09.242733", "path": ["**/details_harness|gsm8k|5_2023-10-29T11-13-09.242733.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-10-29T11-13-09.242733.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hellaswag|10_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-management|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-virology|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-management|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-virology|5_2023-10-09T12-06-26.268228.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-management|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-virology|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["**/details_harness|truthfulqa:mc|0_2023-10-09T12-06-26.268228.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-10-09T12-06-26.268228.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_10_29T11_13_09.242733", "path": ["**/details_harness|winogrande|5_2023-10-29T11-13-09.242733.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-10-29T11-13-09.242733.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_10_09T12_06_26.268228", "path": ["results_2023-10-09T12-06-26.268228.parquet"]}, {"split": "2023_10_29T11_13_09.242733", "path": ["results_2023-10-29T11-13-09.242733.parquet"]}, {"split": "latest", "path": ["results_2023-10-29T11-13-09.242733.parquet"]}]}]}
2023-10-29T11:13:21+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of ehartford/dolphin-2.0-mistral-7b ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model ehartford/dolphin-2.0-mistral-7b on the Open LLM Leaderboard. The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-10-29T11:13:09.242733(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of ehartford/dolphin-2.0-mistral-7b", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model ehartford/dolphin-2.0-mistral-7b on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-29T11:13:09.242733(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of ehartford/dolphin-2.0-mistral-7b", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model ehartford/dolphin-2.0-mistral-7b on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-29T11:13:09.242733(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 23, 31, 171, 67, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of ehartford/dolphin-2.0-mistral-7b## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model ehartford/dolphin-2.0-mistral-7b on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-10-29T11:13:09.242733(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
375e4dc5c493675daa4089c4d7a947410064a8c3
# Dataset Card for Evaluation run of ehartford/samantha-mistral-7b ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/ehartford/samantha-mistral-7b - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [ehartford/samantha-mistral-7b](https://huggingface.co/ehartford/samantha-mistral-7b) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_ehartford__samantha-mistral-7b", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-10-29T01:10:37.829717](https://huggingface.co/datasets/open-llm-leaderboard/details_ehartford__samantha-mistral-7b/blob/main/results_2023-10-29T01-10-37.829717.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "em": 0.056732382550335574, "em_stderr": 0.0023690412638350568, "f1": 0.11221791107382512, "f1_stderr": 0.0025837884585332253, "acc": 0.4639627375502118, "acc_stderr": 0.010980763759790235 }, "harness|drop|3": { "em": 0.056732382550335574, "em_stderr": 0.0023690412638350568, "f1": 0.11221791107382512, "f1_stderr": 0.0025837884585332253 }, "harness|gsm8k|5": { "acc": 0.1599696739954511, "acc_stderr": 0.010097377827752538 }, "harness|winogrande|5": { "acc": 0.7679558011049724, "acc_stderr": 0.011864149691827934 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_ehartford__samantha-mistral-7b
[ "region:us" ]
2023-10-09T11:11:42+00:00
{"pretty_name": "Evaluation run of ehartford/samantha-mistral-7b", "dataset_summary": "Dataset automatically created during the evaluation run of model [ehartford/samantha-mistral-7b](https://huggingface.co/ehartford/samantha-mistral-7b) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_ehartford__samantha-mistral-7b\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-10-29T01:10:37.829717](https://huggingface.co/datasets/open-llm-leaderboard/details_ehartford__samantha-mistral-7b/blob/main/results_2023-10-29T01-10-37.829717.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"em\": 0.056732382550335574,\n \"em_stderr\": 0.0023690412638350568,\n \"f1\": 0.11221791107382512,\n \"f1_stderr\": 0.0025837884585332253,\n \"acc\": 0.4639627375502118,\n \"acc_stderr\": 0.010980763759790235\n },\n \"harness|drop|3\": {\n \"em\": 0.056732382550335574,\n \"em_stderr\": 0.0023690412638350568,\n \"f1\": 0.11221791107382512,\n \"f1_stderr\": 0.0025837884585332253\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.1599696739954511,\n \"acc_stderr\": 0.010097377827752538\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.7679558011049724,\n \"acc_stderr\": 0.011864149691827934\n }\n}\n```", "repo_url": "https://huggingface.co/ehartford/samantha-mistral-7b", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|arc:challenge|25_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_drop_3", "data_files": [{"split": "2023_10_29T01_10_37.829717", "path": ["**/details_harness|drop|3_2023-10-29T01-10-37.829717.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-10-29T01-10-37.829717.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_10_29T01_10_37.829717", "path": ["**/details_harness|gsm8k|5_2023-10-29T01-10-37.829717.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-10-29T01-10-37.829717.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hellaswag|10_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-management|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-virology|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-management|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-virology|5_2023-10-09T12-11-18.939016.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-management|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-virology|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["**/details_harness|truthfulqa:mc|0_2023-10-09T12-11-18.939016.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-10-09T12-11-18.939016.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_10_29T01_10_37.829717", "path": ["**/details_harness|winogrande|5_2023-10-29T01-10-37.829717.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-10-29T01-10-37.829717.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_10_09T12_11_18.939016", "path": ["results_2023-10-09T12-11-18.939016.parquet"]}, {"split": "2023_10_29T01_10_37.829717", "path": ["results_2023-10-29T01-10-37.829717.parquet"]}, {"split": "latest", "path": ["results_2023-10-29T01-10-37.829717.parquet"]}]}]}
2023-10-29T00:10:50+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of ehartford/samantha-mistral-7b ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model ehartford/samantha-mistral-7b on the Open LLM Leaderboard. The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-10-29T01:10:37.829717(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of ehartford/samantha-mistral-7b", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model ehartford/samantha-mistral-7b on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-29T01:10:37.829717(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of ehartford/samantha-mistral-7b", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model ehartford/samantha-mistral-7b on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-29T01:10:37.829717(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 20, 31, 168, 67, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of ehartford/samantha-mistral-7b## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model ehartford/samantha-mistral-7b on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-10-29T01:10:37.829717(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
364ed204db45a7738cec29b277d971c43d344718
# Dataset Card for Evaluation run of ehartford/samantha-mistral-instruct-7b ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/ehartford/samantha-mistral-instruct-7b - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [ehartford/samantha-mistral-instruct-7b](https://huggingface.co/ehartford/samantha-mistral-instruct-7b) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_ehartford__samantha-mistral-instruct-7b", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-10-29T11:08:05.162648](https://huggingface.co/datasets/open-llm-leaderboard/details_ehartford__samantha-mistral-instruct-7b/blob/main/results_2023-10-29T11-08-05.162648.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "em": 0.31291946308724833, "em_stderr": 0.004748536304260034, "f1": 0.36725566275167865, "f1_stderr": 0.0046625848085346845, "acc": 0.4062203613868821, "acc_stderr": 0.010696600366483247 }, "harness|drop|3": { "em": 0.31291946308724833, "em_stderr": 0.004748536304260034, "f1": 0.36725566275167865, "f1_stderr": 0.0046625848085346845 }, "harness|gsm8k|5": { "acc": 0.10841546626231995, "acc_stderr": 0.008563852506627485 }, "harness|winogrande|5": { "acc": 0.7040252565114443, "acc_stderr": 0.012829348226339011 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_ehartford__samantha-mistral-instruct-7b
[ "region:us" ]
2023-10-09T11:17:49+00:00
{"pretty_name": "Evaluation run of ehartford/samantha-mistral-instruct-7b", "dataset_summary": "Dataset automatically created during the evaluation run of model [ehartford/samantha-mistral-instruct-7b](https://huggingface.co/ehartford/samantha-mistral-instruct-7b) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_ehartford__samantha-mistral-instruct-7b\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-10-29T11:08:05.162648](https://huggingface.co/datasets/open-llm-leaderboard/details_ehartford__samantha-mistral-instruct-7b/blob/main/results_2023-10-29T11-08-05.162648.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"em\": 0.31291946308724833,\n \"em_stderr\": 0.004748536304260034,\n \"f1\": 0.36725566275167865,\n \"f1_stderr\": 0.0046625848085346845,\n \"acc\": 0.4062203613868821,\n \"acc_stderr\": 0.010696600366483247\n },\n \"harness|drop|3\": {\n \"em\": 0.31291946308724833,\n \"em_stderr\": 0.004748536304260034,\n \"f1\": 0.36725566275167865,\n \"f1_stderr\": 0.0046625848085346845\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.10841546626231995,\n \"acc_stderr\": 0.008563852506627485\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.7040252565114443,\n \"acc_stderr\": 0.012829348226339011\n }\n}\n```", "repo_url": "https://huggingface.co/ehartford/samantha-mistral-instruct-7b", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|arc:challenge|25_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_drop_3", "data_files": [{"split": "2023_10_29T11_08_05.162648", "path": ["**/details_harness|drop|3_2023-10-29T11-08-05.162648.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-10-29T11-08-05.162648.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_10_29T11_08_05.162648", "path": ["**/details_harness|gsm8k|5_2023-10-29T11-08-05.162648.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-10-29T11-08-05.162648.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hellaswag|10_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-management|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-virology|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-management|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-virology|5_2023-10-09T12-17-25.772796.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-management|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-virology|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["**/details_harness|truthfulqa:mc|0_2023-10-09T12-17-25.772796.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-10-09T12-17-25.772796.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_10_29T11_08_05.162648", "path": ["**/details_harness|winogrande|5_2023-10-29T11-08-05.162648.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-10-29T11-08-05.162648.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_10_09T12_17_25.772796", "path": ["results_2023-10-09T12-17-25.772796.parquet"]}, {"split": "2023_10_29T11_08_05.162648", "path": ["results_2023-10-29T11-08-05.162648.parquet"]}, {"split": "latest", "path": ["results_2023-10-29T11-08-05.162648.parquet"]}]}]}
2023-10-29T11:08:17+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of ehartford/samantha-mistral-instruct-7b ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model ehartford/samantha-mistral-instruct-7b on the Open LLM Leaderboard. The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-10-29T11:08:05.162648(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of ehartford/samantha-mistral-instruct-7b", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model ehartford/samantha-mistral-instruct-7b on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-29T11:08:05.162648(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of ehartford/samantha-mistral-instruct-7b", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model ehartford/samantha-mistral-instruct-7b on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-29T11:08:05.162648(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 23, 31, 171, 67, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of ehartford/samantha-mistral-instruct-7b## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model ehartford/samantha-mistral-instruct-7b on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-10-29T11:08:05.162648(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
3482635a116d28cdac71a1304cee24fdd7296b3e
# Dataset Card for "cqudubstack-android" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqudubstack-android
[ "region:us" ]
2023-10-09T11:19:34+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "queries", "path": "data/queries-*"}, {"split": "corpus", "path": "data/corpus-*"}]}], "dataset_info": {"features": [{"name": "_id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "title", "dtype": "string"}], "splits": [{"name": "queries", "num_bytes": 47953, "num_examples": 699}, {"name": "corpus", "num_bytes": 12840959, "num_examples": 22998}], "download_size": 7657118, "dataset_size": 12888912}}
2023-10-09T11:19:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqudubstack-android" More Information needed
[ "# Dataset Card for \"cqudubstack-android\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqudubstack-android\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqudubstack-android\"\n\nMore Information needed" ]
83418146d33859c8e6ce923c3db23c29d9748516
# Dataset Card for "cqadubstack-android-qrels" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqadubstack-android-qrels
[ "region:us" ]
2023-10-09T11:19:38+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "query-id", "dtype": "string"}, {"name": "corpus-id", "dtype": "string"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 43411, "num_examples": 1696}], "download_size": 19993, "dataset_size": 43411}}
2023-10-09T11:19:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqadubstack-android-qrels" More Information needed
[ "# Dataset Card for \"cqadubstack-android-qrels\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqadubstack-android-qrels\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqadubstack-android-qrels\"\n\nMore Information needed" ]
361f7cf5ac5d740b98cb9195d6379bc6a5201a1f
# Dataset Card for "cqudubstack-gaming" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqudubstack-gaming
[ "region:us" ]
2023-10-09T11:19:48+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "queries", "path": "data/queries-*"}, {"split": "corpus", "path": "data/corpus-*"}]}], "dataset_info": {"features": [{"name": "_id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "title", "dtype": "string"}], "splits": [{"name": "queries", "num_bytes": 105494, "num_examples": 1595}, {"name": "corpus", "num_bytes": 20666596, "num_examples": 45301}], "download_size": 12946080, "dataset_size": 20772090}}
2023-10-09T11:19:52+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqudubstack-gaming" More Information needed
[ "# Dataset Card for \"cqudubstack-gaming\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqudubstack-gaming\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqudubstack-gaming\"\n\nMore Information needed" ]
e7249704b286d80b1390468dd6722c8de9d5408e
# Dataset Card for "cqadubstack-gaming-qrels" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqadubstack-gaming-qrels
[ "region:us" ]
2023-10-09T11:19:52+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "query-id", "dtype": "string"}, {"name": "corpus-id", "dtype": "string"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 60520, "num_examples": 2263}], "download_size": 32524, "dataset_size": 60520}}
2023-10-09T11:19:53+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqadubstack-gaming-qrels" More Information needed
[ "# Dataset Card for \"cqadubstack-gaming-qrels\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqadubstack-gaming-qrels\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqadubstack-gaming-qrels\"\n\nMore Information needed" ]
b52c40a31da6e1ca964e765c8eef631b03db77ce
# Dataset Card for "cqudubstack-mathematica" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqudubstack-mathematica
[ "region:us" ]
2023-10-09T11:19:57+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "queries", "path": "data/queries-*"}, {"split": "corpus", "path": "data/corpus-*"}]}], "dataset_info": {"features": [{"name": "_id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "title", "dtype": "string"}], "splits": [{"name": "queries", "num_bytes": 52792, "num_examples": 804}, {"name": "corpus", "num_bytes": 18735825, "num_examples": 16705}], "download_size": 10393860, "dataset_size": 18788617}}
2023-10-09T11:20:00+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqudubstack-mathematica" More Information needed
[ "# Dataset Card for \"cqudubstack-mathematica\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqudubstack-mathematica\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqudubstack-mathematica\"\n\nMore Information needed" ]
93710db9e824e83f8d7d899c876c3e765b0b498e
# Dataset Card for "cqadubstack-mathematica-qrels" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqadubstack-mathematica-qrels
[ "region:us" ]
2023-10-09T11:20:01+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "query-id", "dtype": "string"}, {"name": "corpus-id", "dtype": "string"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 34691, "num_examples": 1358}], "download_size": 18181, "dataset_size": 34691}}
2023-10-09T11:20:02+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqadubstack-mathematica-qrels" More Information needed
[ "# Dataset Card for \"cqadubstack-mathematica-qrels\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqadubstack-mathematica-qrels\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqadubstack-mathematica-qrels\"\n\nMore Information needed" ]
0aa6fe324f9a663040922fe280850c62d1c60ac8
# Dataset Card for "cqudubstack-programmers" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqudubstack-programmers
[ "region:us" ]
2023-10-09T11:20:06+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "queries", "path": "data/queries-*"}, {"split": "corpus", "path": "data/corpus-*"}]}], "dataset_info": {"features": [{"name": "_id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "title", "dtype": "string"}], "splits": [{"name": "queries", "num_bytes": 63785, "num_examples": 876}, {"name": "corpus", "num_bytes": 32727262, "num_examples": 32176}], "download_size": 19360000, "dataset_size": 32791047}}
2023-10-09T11:20:11+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqudubstack-programmers" More Information needed
[ "# Dataset Card for \"cqudubstack-programmers\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqudubstack-programmers\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqudubstack-programmers\"\n\nMore Information needed" ]
1720dd25fe338e0f91613217556e9c103efb4ceb
# Dataset Card for "cqadubstack-programmers-qrels" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqadubstack-programmers-qrels
[ "region:us" ]
2023-10-09T11:20:11+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "query-id", "dtype": "string"}, {"name": "corpus-id", "dtype": "string"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 45452, "num_examples": 1675}], "download_size": 22632, "dataset_size": 45452}}
2023-10-09T11:20:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqadubstack-programmers-qrels" More Information needed
[ "# Dataset Card for \"cqadubstack-programmers-qrels\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqadubstack-programmers-qrels\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqadubstack-programmers-qrels\"\n\nMore Information needed" ]
38c0407f5ed67c766bcd5744a7b07fde66caa85e
# Dataset Card for "cqudubstack-tex" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqudubstack-tex
[ "region:us" ]
2023-10-09T11:21:00+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "queries", "path": "data/queries-*"}, {"split": "corpus", "path": "data/corpus-*"}]}], "dataset_info": {"features": [{"name": "_id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "title", "dtype": "string"}], "splits": [{"name": "queries", "num_bytes": 186934, "num_examples": 2906}, {"name": "corpus", "num_bytes": 86600423, "num_examples": 68184}], "download_size": 43424126, "dataset_size": 86787357}}
2023-10-09T11:21:06+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqudubstack-tex" More Information needed
[ "# Dataset Card for \"cqudubstack-tex\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqudubstack-tex\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqudubstack-tex\"\n\nMore Information needed" ]
c7b87d8621e6b7a7d5e4d44b82241cdaa884a60a
# Dataset Card for "cqadubstack-tex-qrels" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqadubstack-tex-qrels
[ "region:us" ]
2023-10-09T11:21:07+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "query-id", "dtype": "string"}, {"name": "corpus-id", "dtype": "string"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 137572, "num_examples": 5154}], "download_size": 67107, "dataset_size": 137572}}
2023-10-09T11:21:08+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqadubstack-tex-qrels" More Information needed
[ "# Dataset Card for \"cqadubstack-tex-qrels\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqadubstack-tex-qrels\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqadubstack-tex-qrels\"\n\nMore Information needed" ]
d130edbb324724d1a856ab189a2a08b7fe43c3bb
# Dataset Card for "cqudubstack-webmasters" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqudubstack-webmasters
[ "region:us" ]
2023-10-09T11:21:10+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "queries", "path": "data/queries-*"}, {"split": "corpus", "path": "data/corpus-*"}]}], "dataset_info": {"features": [{"name": "_id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "title", "dtype": "string"}], "splits": [{"name": "queries", "num_bytes": 34792, "num_examples": 506}, {"name": "corpus", "num_bytes": 11659413, "num_examples": 17405}], "download_size": 6885106, "dataset_size": 11694205}}
2023-10-09T11:21:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqudubstack-webmasters" More Information needed
[ "# Dataset Card for \"cqudubstack-webmasters\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqudubstack-webmasters\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqudubstack-webmasters\"\n\nMore Information needed" ]
df94bd8b7716143735b0db3dda5ba6b8893ad2a1
# Dataset Card for "cqadubstack-webmasters-qrels" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqadubstack-webmasters-qrels
[ "region:us" ]
2023-10-09T11:21:13+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "query-id", "dtype": "string"}, {"name": "corpus-id", "dtype": "string"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 35771, "num_examples": 1395}], "download_size": 16248, "dataset_size": 35771}}
2023-10-09T11:21:14+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqadubstack-webmasters-qrels" More Information needed
[ "# Dataset Card for \"cqadubstack-webmasters-qrels\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqadubstack-webmasters-qrels\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqadubstack-webmasters-qrels\"\n\nMore Information needed" ]
725bc96e1cece57602360642fd36e5e4bb22e359
# Dataset Card for "cqudubstack-english" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqudubstack-english
[ "region:us" ]
2023-10-09T11:21:22+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "queries", "path": "data/queries-*"}, {"split": "corpus", "path": "data/corpus-*"}]}], "dataset_info": {"features": [{"name": "_id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "title", "dtype": "string"}], "splits": [{"name": "queries", "num_bytes": 103588, "num_examples": 1570}, {"name": "corpus", "num_bytes": 18199570, "num_examples": 40221}], "download_size": 11382247, "dataset_size": 18303158}}
2023-10-09T11:21:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqudubstack-english" More Information needed
[ "# Dataset Card for \"cqudubstack-english\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqudubstack-english\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqudubstack-english\"\n\nMore Information needed" ]
01c5e83d2c8e5364e1bd801482b98cce07375a81
# Dataset Card for "cqadubstack-english-qrels" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqadubstack-english-qrels
[ "region:us" ]
2023-10-09T11:21:27+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "query-id", "dtype": "string"}, {"name": "corpus-id", "dtype": "string"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 100171, "num_examples": 3765}], "download_size": 45031, "dataset_size": 100171}}
2023-10-09T11:21:28+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqadubstack-english-qrels" More Information needed
[ "# Dataset Card for \"cqadubstack-english-qrels\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqadubstack-english-qrels\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqadubstack-english-qrels\"\n\nMore Information needed" ]
f375b2569b78fa235b989c98944c062e2e83afa2
# Dataset Card for "cqudubstack-gis" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqudubstack-gis
[ "region:us" ]
2023-10-09T11:21:32+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "queries", "path": "data/queries-*"}, {"split": "corpus", "path": "data/corpus-*"}]}], "dataset_info": {"features": [{"name": "_id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "title", "dtype": "string"}], "splits": [{"name": "queries", "num_bytes": 61244, "num_examples": 885}, {"name": "corpus", "num_bytes": 36704924, "num_examples": 37637}], "download_size": 20083359, "dataset_size": 36766168}}
2023-10-09T11:21:37+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqudubstack-gis" More Information needed
[ "# Dataset Card for \"cqudubstack-gis\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqudubstack-gis\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqudubstack-gis\"\n\nMore Information needed" ]
e8fb795b5e312ced8602f04049854927b06f79fb
# Dataset Card for "cqadubstack-gis-qrels" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqadubstack-gis-qrels
[ "region:us" ]
2023-10-09T11:21:37+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "query-id", "dtype": "string"}, {"name": "corpus-id", "dtype": "string"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 28952, "num_examples": 1114}], "download_size": 17234, "dataset_size": 28952}}
2023-10-09T11:21:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqadubstack-gis-qrels" More Information needed
[ "# Dataset Card for \"cqadubstack-gis-qrels\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqadubstack-gis-qrels\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqadubstack-gis-qrels\"\n\nMore Information needed" ]
be05f6953709b971c93f45a820ffc8a2f31deeab
# Dataset Card for "cqudubstack-physics" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqudubstack-physics
[ "region:us" ]
2023-10-09T11:21:43+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "queries", "path": "data/queries-*"}, {"split": "corpus", "path": "data/corpus-*"}]}], "dataset_info": {"features": [{"name": "_id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "title", "dtype": "string"}], "splits": [{"name": "queries", "num_bytes": 73255, "num_examples": 1039}, {"name": "corpus", "num_bytes": 29949928, "num_examples": 38316}], "download_size": 17827262, "dataset_size": 30023183}}
2023-10-09T11:21:48+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqudubstack-physics" More Information needed
[ "# Dataset Card for \"cqudubstack-physics\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqudubstack-physics\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqudubstack-physics\"\n\nMore Information needed" ]
7c4d1e3f317634268ab6adcc3082a76a44055105
# Dataset Card for "cqadubstack-physics-qrels" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqadubstack-physics-qrels
[ "region:us" ]
2023-10-09T11:21:49+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "query-id", "dtype": "string"}, {"name": "corpus-id", "dtype": "string"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 50809, "num_examples": 1933}], "download_size": 25022, "dataset_size": 50809}}
2023-10-09T11:21:50+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqadubstack-physics-qrels" More Information needed
[ "# Dataset Card for \"cqadubstack-physics-qrels\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqadubstack-physics-qrels\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqadubstack-physics-qrels\"\n\nMore Information needed" ]
cf808c33cf7e9825e3687dbb1d30861915d0b91b
# Dataset Card for "cqudubstack-stats" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqudubstack-stats
[ "region:us" ]
2023-10-09T11:21:53+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "queries", "path": "data/queries-*"}, {"split": "corpus", "path": "data/corpus-*"}]}], "dataset_info": {"features": [{"name": "_id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "title", "dtype": "string"}], "splits": [{"name": "queries", "num_bytes": 47795, "num_examples": 652}, {"name": "corpus", "num_bytes": 42923933, "num_examples": 42269}], "download_size": 24679799, "dataset_size": 42971728}}
2023-10-09T11:21:59+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqudubstack-stats" More Information needed
[ "# Dataset Card for \"cqudubstack-stats\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqudubstack-stats\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqudubstack-stats\"\n\nMore Information needed" ]
7d9d608fcc84133b2dea7b3f510de1e2944b6538
# Dataset Card for "cqadubstack-stats-qrels" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqadubstack-stats-qrels
[ "region:us" ]
2023-10-09T11:21:59+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "query-id", "dtype": "string"}, {"name": "corpus-id", "dtype": "string"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 23665, "num_examples": 913}], "download_size": 13316, "dataset_size": 23665}}
2023-10-09T11:22:01+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqadubstack-stats-qrels" More Information needed
[ "# Dataset Card for \"cqadubstack-stats-qrels\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqadubstack-stats-qrels\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqadubstack-stats-qrels\"\n\nMore Information needed" ]
fef303d0331188489e307fdc424bb4e7b49fd64f
# Dataset Card for "cqudubstack-unix" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqudubstack-unix
[ "region:us" ]
2023-10-09T11:22:06+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "queries", "path": "data/queries-*"}, {"split": "corpus", "path": "data/corpus-*"}]}], "dataset_info": {"features": [{"name": "_id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "title", "dtype": "string"}], "splits": [{"name": "queries", "num_bytes": 72357, "num_examples": 1072}, {"name": "corpus", "num_bytes": 46102756, "num_examples": 47382}], "download_size": 24571026, "dataset_size": 46175113}}
2023-10-09T11:22:11+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqudubstack-unix" More Information needed
[ "# Dataset Card for \"cqudubstack-unix\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqudubstack-unix\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqudubstack-unix\"\n\nMore Information needed" ]
c1853b65810ef16bfc4a5ec7b8009a205902821f
# Dataset Card for "cqadubstack-unix-qrels" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqadubstack-unix-qrels
[ "region:us" ]
2023-10-09T11:22:11+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "query-id", "dtype": "string"}, {"name": "corpus-id", "dtype": "string"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 44636, "num_examples": 1693}], "download_size": 23577, "dataset_size": 44636}}
2023-10-09T11:22:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqadubstack-unix-qrels" More Information needed
[ "# Dataset Card for \"cqadubstack-unix-qrels\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqadubstack-unix-qrels\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqadubstack-unix-qrels\"\n\nMore Information needed" ]
5b1565d5657bd5aafaf15150d70c43c85a6089cc
# Dataset Card for "cqudubstack-wordpress" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqudubstack-wordpress
[ "region:us" ]
2023-10-09T11:22:15+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "queries", "path": "data/queries-*"}, {"split": "corpus", "path": "data/corpus-*"}]}], "dataset_info": {"features": [{"name": "_id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "title", "dtype": "string"}], "splits": [{"name": "queries", "num_bytes": 35736, "num_examples": 541}, {"name": "corpus", "num_bytes": 53026140, "num_examples": 48605}], "download_size": 26551471, "dataset_size": 53061876}}
2023-10-09T11:22:20+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqudubstack-wordpress" More Information needed
[ "# Dataset Card for \"cqudubstack-wordpress\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqudubstack-wordpress\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqudubstack-wordpress\"\n\nMore Information needed" ]
f6c95afca1c42b41f214db61693e96a0aa9c8dbf
# Dataset Card for "cqadubstack-wordpress-qrels" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqadubstack-wordpress-qrels
[ "region:us" ]
2023-10-09T11:22:20+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "query-id", "dtype": "string"}, {"name": "corpus-id", "dtype": "string"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 19885, "num_examples": 744}], "download_size": 11490, "dataset_size": 19885}}
2023-10-09T11:22:21+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqadubstack-wordpress-qrels" More Information needed
[ "# Dataset Card for \"cqadubstack-wordpress-qrels\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqadubstack-wordpress-qrels\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqadubstack-wordpress-qrels\"\n\nMore Information needed" ]
a400b3b6dbb5caa76f0068ef8aa817785ee0ee84
Fix for [TheFusion21/PokemonCards](https://huggingface.co/datasets/TheFusion21/PokemonCards), where the images with broken links were discarded. Tested while fine-tuning [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b) with LoRA using my custom Git repository: https://github.com/tonywu71/idefics-project.
tonywu71/PokemonCards_fixed
[ "task_categories:question-answering", "language:en", "license:mit", "region:us" ]
2023-10-09T11:23:59+00:00
{"language": ["en"], "license": "mit", "task_categories": ["question-answering"], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "image_url", "dtype": "string"}, {"name": "caption", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "hp", "dtype": "int64"}, {"name": "set_name", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 9474973.87624629, "num_examples": 13088}], "download_size": 3028812, "dataset_size": 9474973.87624629}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-11T07:19:53+00:00
[]
[ "en" ]
TAGS #task_categories-question-answering #language-English #license-mit #region-us
Fix for TheFusion21/PokemonCards, where the images with broken links were discarded. Tested while fine-tuning HuggingFaceM4/idefics-9b with LoRA using my custom Git repository: URL
[]
[ "TAGS\n#task_categories-question-answering #language-English #license-mit #region-us \n" ]
[ 27 ]
[ "passage: TAGS\n#task_categories-question-answering #language-English #license-mit #region-us \n" ]
0247f48f0c8e8132b346950fafee690d2737e935
# Dataset Card for Evaluation run of Open-Orca/Mistral-7B-OpenOrca ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/Open-Orca/Mistral-7B-OpenOrca - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [Open-Orca/Mistral-7B-OpenOrca](https://huggingface.co/Open-Orca/Mistral-7B-OpenOrca) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_Open-Orca__Mistral-7B-OpenOrca", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-10-29T06:22:53.674218](https://huggingface.co/datasets/open-llm-leaderboard/details_Open-Orca__Mistral-7B-OpenOrca/blob/main/results_2023-10-29T06-22-53.674218.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "em": 0.13716442953020133, "em_stderr": 0.003523095554552689, "f1": 0.20527894295301938, "f1_stderr": 0.00363436386580985, "acc": 0.48841023640281406, "acc_stderr": 0.011348185919594158 }, "harness|drop|3": { "em": 0.13716442953020133, "em_stderr": 0.003523095554552689, "f1": 0.20527894295301938, "f1_stderr": 0.00363436386580985 }, "harness|gsm8k|5": { "acc": 0.19939347990902198, "acc_stderr": 0.011005438029475652 }, "harness|winogrande|5": { "acc": 0.7774269928966061, "acc_stderr": 0.011690933809712666 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_Open-Orca__Mistral-7B-OpenOrca
[ "region:us" ]
2023-10-09T11:29:01+00:00
{"pretty_name": "Evaluation run of Open-Orca/Mistral-7B-OpenOrca", "dataset_summary": "Dataset automatically created during the evaluation run of model [Open-Orca/Mistral-7B-OpenOrca](https://huggingface.co/Open-Orca/Mistral-7B-OpenOrca) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_Open-Orca__Mistral-7B-OpenOrca\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-10-29T06:22:53.674218](https://huggingface.co/datasets/open-llm-leaderboard/details_Open-Orca__Mistral-7B-OpenOrca/blob/main/results_2023-10-29T06-22-53.674218.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"em\": 0.13716442953020133,\n \"em_stderr\": 0.003523095554552689,\n \"f1\": 0.20527894295301938,\n \"f1_stderr\": 0.00363436386580985,\n \"acc\": 0.48841023640281406,\n \"acc_stderr\": 0.011348185919594158\n },\n \"harness|drop|3\": {\n \"em\": 0.13716442953020133,\n \"em_stderr\": 0.003523095554552689,\n \"f1\": 0.20527894295301938,\n \"f1_stderr\": 0.00363436386580985\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.19939347990902198,\n \"acc_stderr\": 0.011005438029475652\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.7774269928966061,\n \"acc_stderr\": 0.011690933809712666\n }\n}\n```", "repo_url": "https://huggingface.co/Open-Orca/Mistral-7B-OpenOrca", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|arc:challenge|25_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_drop_3", "data_files": [{"split": "2023_10_29T06_22_53.674218", "path": ["**/details_harness|drop|3_2023-10-29T06-22-53.674218.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-10-29T06-22-53.674218.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_10_29T06_22_53.674218", "path": ["**/details_harness|gsm8k|5_2023-10-29T06-22-53.674218.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-10-29T06-22-53.674218.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hellaswag|10_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-management|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-virology|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-management|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-virology|5_2023-10-09T12-28-38.184371.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-management|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-virology|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["**/details_harness|truthfulqa:mc|0_2023-10-09T12-28-38.184371.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-10-09T12-28-38.184371.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_10_29T06_22_53.674218", "path": ["**/details_harness|winogrande|5_2023-10-29T06-22-53.674218.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-10-29T06-22-53.674218.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_10_09T12_28_38.184371", "path": ["results_2023-10-09T12-28-38.184371.parquet"]}, {"split": "2023_10_29T06_22_53.674218", "path": ["results_2023-10-29T06-22-53.674218.parquet"]}, {"split": "latest", "path": ["results_2023-10-29T06-22-53.674218.parquet"]}]}]}
2023-10-29T06:23:06+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of Open-Orca/Mistral-7B-OpenOrca ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model Open-Orca/Mistral-7B-OpenOrca on the Open LLM Leaderboard. The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-10-29T06:22:53.674218(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of Open-Orca/Mistral-7B-OpenOrca", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model Open-Orca/Mistral-7B-OpenOrca on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-29T06:22:53.674218(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of Open-Orca/Mistral-7B-OpenOrca", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model Open-Orca/Mistral-7B-OpenOrca on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-29T06:22:53.674218(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 22, 31, 170, 67, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of Open-Orca/Mistral-7B-OpenOrca## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model Open-Orca/Mistral-7B-OpenOrca on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-10-29T06:22:53.674218(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
efceb9305d941a2eff831b3013deb7ececd28f44
# Dataset Card for Evaluation run of PulsarAI/Nebula-7B ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/PulsarAI/Nebula-7B - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [PulsarAI/Nebula-7B](https://huggingface.co/PulsarAI/Nebula-7B) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_PulsarAI__Nebula-7B", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-10-23T05:54:57.990759](https://huggingface.co/datasets/open-llm-leaderboard/details_PulsarAI__Nebula-7B/blob/main/results_2023-10-23T05-54-57.990759.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "em": 0.3613674496644295, "em_stderr": 0.004919712134554973, "f1": 0.4096088506711411, "f1_stderr": 0.00477602953566436, "acc": 0.4563034467407025, "acc_stderr": 0.01086566601540176 }, "harness|drop|3": { "em": 0.3613674496644295, "em_stderr": 0.004919712134554973, "f1": 0.4096088506711411, "f1_stderr": 0.00477602953566436 }, "harness|gsm8k|5": { "acc": 0.14859742228961334, "acc_stderr": 0.009797503180527892 }, "harness|winogrande|5": { "acc": 0.7640094711917916, "acc_stderr": 0.011933828850275625 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_PulsarAI__Nebula-7B
[ "region:us" ]
2023-10-09T11:30:01+00:00
{"pretty_name": "Evaluation run of PulsarAI/Nebula-7B", "dataset_summary": "Dataset automatically created during the evaluation run of model [PulsarAI/Nebula-7B](https://huggingface.co/PulsarAI/Nebula-7B) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_PulsarAI__Nebula-7B\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-10-23T05:54:57.990759](https://huggingface.co/datasets/open-llm-leaderboard/details_PulsarAI__Nebula-7B/blob/main/results_2023-10-23T05-54-57.990759.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"em\": 0.3613674496644295,\n \"em_stderr\": 0.004919712134554973,\n \"f1\": 0.4096088506711411,\n \"f1_stderr\": 0.00477602953566436,\n \"acc\": 0.4563034467407025,\n \"acc_stderr\": 0.01086566601540176\n },\n \"harness|drop|3\": {\n \"em\": 0.3613674496644295,\n \"em_stderr\": 0.004919712134554973,\n \"f1\": 0.4096088506711411,\n \"f1_stderr\": 0.00477602953566436\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.14859742228961334,\n \"acc_stderr\": 0.009797503180527892\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.7640094711917916,\n \"acc_stderr\": 0.011933828850275625\n }\n}\n```", "repo_url": "https://huggingface.co/PulsarAI/Nebula-7B", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|arc:challenge|25_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_drop_3", "data_files": [{"split": "2023_10_23T05_54_57.990759", "path": ["**/details_harness|drop|3_2023-10-23T05-54-57.990759.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-10-23T05-54-57.990759.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_10_23T05_54_57.990759", "path": ["**/details_harness|gsm8k|5_2023-10-23T05-54-57.990759.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-10-23T05-54-57.990759.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hellaswag|10_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-management|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-virology|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-management|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-virology|5_2023-10-09T12-29-36.965037.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-management|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-virology|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["**/details_harness|truthfulqa:mc|0_2023-10-09T12-29-36.965037.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-10-09T12-29-36.965037.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_10_23T05_54_57.990759", "path": ["**/details_harness|winogrande|5_2023-10-23T05-54-57.990759.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-10-23T05-54-57.990759.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_10_09T12_29_36.965037", "path": ["results_2023-10-09T12-29-36.965037.parquet"]}, {"split": "2023_10_23T05_54_57.990759", "path": ["results_2023-10-23T05-54-57.990759.parquet"]}, {"split": "latest", "path": ["results_2023-10-23T05-54-57.990759.parquet"]}]}]}
2023-10-23T04:55:10+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of PulsarAI/Nebula-7B ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model PulsarAI/Nebula-7B on the Open LLM Leaderboard. The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-10-23T05:54:57.990759(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of PulsarAI/Nebula-7B", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model PulsarAI/Nebula-7B on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-23T05:54:57.990759(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of PulsarAI/Nebula-7B", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model PulsarAI/Nebula-7B on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-23T05:54:57.990759(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 17, 31, 165, 67, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of PulsarAI/Nebula-7B## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model PulsarAI/Nebula-7B on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-10-23T05:54:57.990759(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
a4184d8ea178ca42ed27fd08432f3e288b658e1d
# Dataset Card for "finetune" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
promptora11/finetune
[ "region:us" ]
2023-10-09T11:33:06+00:00
{"dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 74044, "num_examples": 100}], "download_size": 7883, "dataset_size": 74044}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-09T11:38:42+00:00
[]
[]
TAGS #region-us
# Dataset Card for "finetune" More Information needed
[ "# Dataset Card for \"finetune\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"finetune\"\n\nMore Information needed" ]
[ 6, 12 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"finetune\"\n\nMore Information needed" ]
8afb45eed0e403e5cd22dd570463a0f61bdc2e4a
# Dataset Card for "cqudupstack-android" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqudupstack-android
[ "region:us" ]
2023-10-09T11:36:30+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "queries", "path": "data/queries-*"}, {"split": "corpus", "path": "data/corpus-*"}]}], "dataset_info": {"features": [{"name": "_id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "title", "dtype": "string"}], "splits": [{"name": "queries", "num_bytes": 47953, "num_examples": 699}, {"name": "corpus", "num_bytes": 12840959, "num_examples": 22998}], "download_size": 7657118, "dataset_size": 12888912}}
2023-10-09T11:36:34+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqudupstack-android" More Information needed
[ "# Dataset Card for \"cqudupstack-android\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqudupstack-android\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqudupstack-android\"\n\nMore Information needed" ]
1687741fcd6cb0f2278b904d6d371543d4b6cf67
# Dataset Card for "cqadupstack-android-qrels" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqadupstack-android-qrels
[ "region:us" ]
2023-10-09T11:36:34+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "query-id", "dtype": "string"}, {"name": "corpus-id", "dtype": "string"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 43411, "num_examples": 1696}], "download_size": 0, "dataset_size": 43411}}
2023-10-09T11:39:31+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqadupstack-android-qrels" More Information needed
[ "# Dataset Card for \"cqadupstack-android-qrels\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqadupstack-android-qrels\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqadupstack-android-qrels\"\n\nMore Information needed" ]
45434d1152ca0dbb3c9edcd6dfbe944e3a314a9a
# Dataset Card for "cqudupstack-gaming" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqudupstack-gaming
[ "region:us" ]
2023-10-09T11:36:43+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "queries", "path": "data/queries-*"}, {"split": "corpus", "path": "data/corpus-*"}]}], "dataset_info": {"features": [{"name": "_id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "title", "dtype": "string"}], "splits": [{"name": "queries", "num_bytes": 105494, "num_examples": 1595}, {"name": "corpus", "num_bytes": 20666596, "num_examples": 45301}], "download_size": 12946080, "dataset_size": 20772090}}
2023-10-09T11:36:47+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqudupstack-gaming" More Information needed
[ "# Dataset Card for \"cqudupstack-gaming\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqudupstack-gaming\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqudupstack-gaming\"\n\nMore Information needed" ]
ed714322b557b87f1658e2320ba64149a3dea24e
# Dataset Card for "cqadupstack-gaming-qrels" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqadupstack-gaming-qrels
[ "region:us" ]
2023-10-09T11:36:48+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "query-id", "dtype": "string"}, {"name": "corpus-id", "dtype": "string"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 60520, "num_examples": 2263}], "download_size": 0, "dataset_size": 60520}}
2023-10-09T11:39:44+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqadupstack-gaming-qrels" More Information needed
[ "# Dataset Card for \"cqadupstack-gaming-qrels\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqadupstack-gaming-qrels\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqadupstack-gaming-qrels\"\n\nMore Information needed" ]
06aeb8c749b7fa51334e9aeeaff4513689b42438
# Dataset Card for "cqudupstack-mathematica" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqudupstack-mathematica
[ "region:us" ]
2023-10-09T11:36:53+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "queries", "path": "data/queries-*"}, {"split": "corpus", "path": "data/corpus-*"}]}], "dataset_info": {"features": [{"name": "_id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "title", "dtype": "string"}], "splits": [{"name": "queries", "num_bytes": 52792, "num_examples": 804}, {"name": "corpus", "num_bytes": 18735825, "num_examples": 16705}], "download_size": 10393860, "dataset_size": 18788617}}
2023-10-09T11:36:57+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqudupstack-mathematica" More Information needed
[ "# Dataset Card for \"cqudupstack-mathematica\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqudupstack-mathematica\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqudupstack-mathematica\"\n\nMore Information needed" ]
ce627655eef7e3bb08b9adbaf2b133c68ab8939d
# Dataset Card for "cqadupstack-mathematica-qrels" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqadupstack-mathematica-qrels
[ "region:us" ]
2023-10-09T11:36:57+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "query-id", "dtype": "string"}, {"name": "corpus-id", "dtype": "string"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 34691, "num_examples": 1358}], "download_size": 0, "dataset_size": 34691}}
2023-10-09T11:39:53+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqadupstack-mathematica-qrels" More Information needed
[ "# Dataset Card for \"cqadupstack-mathematica-qrels\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqadupstack-mathematica-qrels\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqadupstack-mathematica-qrels\"\n\nMore Information needed" ]
fe78f094db1fb8b97c023f49e449298f89121853
# Dataset Card for "cqudupstack-programmers" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqudupstack-programmers
[ "region:us" ]
2023-10-09T11:37:03+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "queries", "path": "data/queries-*"}, {"split": "corpus", "path": "data/corpus-*"}]}], "dataset_info": {"features": [{"name": "_id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "title", "dtype": "string"}], "splits": [{"name": "queries", "num_bytes": 63785, "num_examples": 876}, {"name": "corpus", "num_bytes": 32727262, "num_examples": 32176}], "download_size": 19360000, "dataset_size": 32791047}}
2023-10-09T11:37:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqudupstack-programmers" More Information needed
[ "# Dataset Card for \"cqudupstack-programmers\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqudupstack-programmers\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqudupstack-programmers\"\n\nMore Information needed" ]
5cb210d71bc9b5f223d255f2fabab65d35f48b17
# Dataset Card for "cqadupstack-programmers-qrels" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqadupstack-programmers-qrels
[ "region:us" ]
2023-10-09T11:37:07+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "query-id", "dtype": "string"}, {"name": "corpus-id", "dtype": "string"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 45452, "num_examples": 1675}], "download_size": 0, "dataset_size": 45452}}
2023-10-09T11:40:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqadupstack-programmers-qrels" More Information needed
[ "# Dataset Card for \"cqadupstack-programmers-qrels\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqadupstack-programmers-qrels\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqadupstack-programmers-qrels\"\n\nMore Information needed" ]
fd0b959b9ad238f5559dd90cd43f435173aa1fdb
# Dataset Card for Evaluation run of Weyaxi/Samantha-Nebula-7B ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/Weyaxi/Samantha-Nebula-7B - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [Weyaxi/Samantha-Nebula-7B](https://huggingface.co/Weyaxi/Samantha-Nebula-7B) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_Weyaxi__Samantha-Nebula-7B", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-10-24T22:52:33.668661](https://huggingface.co/datasets/open-llm-leaderboard/details_Weyaxi__Samantha-Nebula-7B/blob/main/results_2023-10-24T22-52-33.668661.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "em": 0.3792994966442953, "em_stderr": 0.004969032454438954, "f1": 0.4256501677852355, "f1_stderr": 0.0048455756354128885, "acc": 0.42229140848972546, "acc_stderr": 0.010604861041151385 }, "harness|drop|3": { "em": 0.3792994966442953, "em_stderr": 0.004969032454438954, "f1": 0.4256501677852355, "f1_stderr": 0.0048455756354128885 }, "harness|gsm8k|5": { "acc": 0.11372251705837756, "acc_stderr": 0.008744810131034036 }, "harness|winogrande|5": { "acc": 0.7308602999210734, "acc_stderr": 0.012464911951268734 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_Weyaxi__Samantha-Nebula-7B
[ "region:us" ]
2023-10-09T11:37:09+00:00
{"pretty_name": "Evaluation run of Weyaxi/Samantha-Nebula-7B", "dataset_summary": "Dataset automatically created during the evaluation run of model [Weyaxi/Samantha-Nebula-7B](https://huggingface.co/Weyaxi/Samantha-Nebula-7B) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_Weyaxi__Samantha-Nebula-7B\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-10-24T22:52:33.668661](https://huggingface.co/datasets/open-llm-leaderboard/details_Weyaxi__Samantha-Nebula-7B/blob/main/results_2023-10-24T22-52-33.668661.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"em\": 0.3792994966442953,\n \"em_stderr\": 0.004969032454438954,\n \"f1\": 0.4256501677852355,\n \"f1_stderr\": 0.0048455756354128885,\n \"acc\": 0.42229140848972546,\n \"acc_stderr\": 0.010604861041151385\n },\n \"harness|drop|3\": {\n \"em\": 0.3792994966442953,\n \"em_stderr\": 0.004969032454438954,\n \"f1\": 0.4256501677852355,\n \"f1_stderr\": 0.0048455756354128885\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.11372251705837756,\n \"acc_stderr\": 0.008744810131034036\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.7308602999210734,\n \"acc_stderr\": 0.012464911951268734\n }\n}\n```", "repo_url": "https://huggingface.co/Weyaxi/Samantha-Nebula-7B", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|arc:challenge|25_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_drop_3", "data_files": [{"split": "2023_10_24T22_52_33.668661", "path": ["**/details_harness|drop|3_2023-10-24T22-52-33.668661.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-10-24T22-52-33.668661.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_10_24T22_52_33.668661", "path": ["**/details_harness|gsm8k|5_2023-10-24T22-52-33.668661.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-10-24T22-52-33.668661.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hellaswag|10_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-management|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-virology|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-management|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-virology|5_2023-10-09T12-36-46.129297.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-management|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-virology|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["**/details_harness|truthfulqa:mc|0_2023-10-09T12-36-46.129297.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-10-09T12-36-46.129297.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_10_24T22_52_33.668661", "path": ["**/details_harness|winogrande|5_2023-10-24T22-52-33.668661.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-10-24T22-52-33.668661.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_10_09T12_36_46.129297", "path": ["results_2023-10-09T12-36-46.129297.parquet"]}, {"split": "2023_10_24T22_52_33.668661", "path": ["results_2023-10-24T22-52-33.668661.parquet"]}, {"split": "latest", "path": ["results_2023-10-24T22-52-33.668661.parquet"]}]}]}
2023-10-24T21:52:46+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of Weyaxi/Samantha-Nebula-7B ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model Weyaxi/Samantha-Nebula-7B on the Open LLM Leaderboard. The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-10-24T22:52:33.668661(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of Weyaxi/Samantha-Nebula-7B", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model Weyaxi/Samantha-Nebula-7B on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-24T22:52:33.668661(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of Weyaxi/Samantha-Nebula-7B", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model Weyaxi/Samantha-Nebula-7B on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-24T22:52:33.668661(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 20, 31, 168, 66, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of Weyaxi/Samantha-Nebula-7B## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model Weyaxi/Samantha-Nebula-7B on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-10-24T22:52:33.668661(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
6b17246592ce84c2fa5d655e5ffe69bca7f4cdd1
# Dataset Card for "cqudupstack-tex" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqudupstack-tex
[ "region:us" ]
2023-10-09T11:37:52+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "queries", "path": "data/queries-*"}, {"split": "corpus", "path": "data/corpus-*"}]}], "dataset_info": {"features": [{"name": "_id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "title", "dtype": "string"}], "splits": [{"name": "queries", "num_bytes": 186934, "num_examples": 2906}, {"name": "corpus", "num_bytes": 86600423, "num_examples": 68184}], "download_size": 43424126, "dataset_size": 86787357}}
2023-10-09T11:37:57+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqudupstack-tex" More Information needed
[ "# Dataset Card for \"cqudupstack-tex\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqudupstack-tex\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqudupstack-tex\"\n\nMore Information needed" ]
92d6e31620708eb331bb8a65efae165fd3cc55fb
# Dataset Card for "cqadupstack-tex-qrels" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqadupstack-tex-qrels
[ "region:us" ]
2023-10-09T11:37:58+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "query-id", "dtype": "string"}, {"name": "corpus-id", "dtype": "string"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 137572, "num_examples": 5154}], "download_size": 0, "dataset_size": 137572}}
2023-10-09T11:40:58+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqadupstack-tex-qrels" More Information needed
[ "# Dataset Card for \"cqadupstack-tex-qrels\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqadupstack-tex-qrels\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqadupstack-tex-qrels\"\n\nMore Information needed" ]
846c700eb6a6f2f8ef14097b907a58c8885ce079
# Dataset Card for "cqudupstack-webmasters" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqudupstack-webmasters
[ "region:us" ]
2023-10-09T11:38:01+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "queries", "path": "data/queries-*"}, {"split": "corpus", "path": "data/corpus-*"}]}], "dataset_info": {"features": [{"name": "_id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "title", "dtype": "string"}], "splits": [{"name": "queries", "num_bytes": 34792, "num_examples": 506}, {"name": "corpus", "num_bytes": 11659413, "num_examples": 17405}], "download_size": 6885106, "dataset_size": 11694205}}
2023-10-09T11:38:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqudupstack-webmasters" More Information needed
[ "# Dataset Card for \"cqudupstack-webmasters\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqudupstack-webmasters\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqudupstack-webmasters\"\n\nMore Information needed" ]
ff7d8ac2bef48e525a2a413a7a27562b7d320405
# Dataset Card for "cqadupstack-webmasters-qrels" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqadupstack-webmasters-qrels
[ "region:us" ]
2023-10-09T11:38:05+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "query-id", "dtype": "string"}, {"name": "corpus-id", "dtype": "string"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 35771, "num_examples": 1395}], "download_size": 0, "dataset_size": 35771}}
2023-10-09T11:41:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqadupstack-webmasters-qrels" More Information needed
[ "# Dataset Card for \"cqadupstack-webmasters-qrels\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqadupstack-webmasters-qrels\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqadupstack-webmasters-qrels\"\n\nMore Information needed" ]
d1c51f0beab12622ab8239da5058c20ae7fd2bd1
# Dataset Card for "cqudupstack-english" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqudupstack-english
[ "region:us" ]
2023-10-09T11:38:14+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "queries", "path": "data/queries-*"}, {"split": "corpus", "path": "data/corpus-*"}]}], "dataset_info": {"features": [{"name": "_id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "title", "dtype": "string"}], "splits": [{"name": "queries", "num_bytes": 103588, "num_examples": 1570}, {"name": "corpus", "num_bytes": 18199570, "num_examples": 40221}], "download_size": 11382247, "dataset_size": 18303158}}
2023-10-09T11:38:18+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqudupstack-english" More Information needed
[ "# Dataset Card for \"cqudupstack-english\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqudupstack-english\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqudupstack-english\"\n\nMore Information needed" ]
eca37c77cec0e9503dadc0afe35b28a04044a500
# Dataset Card for "cqadupstack-english-qrels" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqadupstack-english-qrels
[ "region:us" ]
2023-10-09T11:38:19+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "query-id", "dtype": "string"}, {"name": "corpus-id", "dtype": "string"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 100171, "num_examples": 3765}], "download_size": 0, "dataset_size": 100171}}
2023-10-09T11:41:19+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqadupstack-english-qrels" More Information needed
[ "# Dataset Card for \"cqadupstack-english-qrels\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqadupstack-english-qrels\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqadupstack-english-qrels\"\n\nMore Information needed" ]
fee08f65c4345c24ca0bee20ade5438f30a656bf
# Dataset Card for "cqudupstack-gis" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqudupstack-gis
[ "region:us" ]
2023-10-09T11:38:25+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "queries", "path": "data/queries-*"}, {"split": "corpus", "path": "data/corpus-*"}]}], "dataset_info": {"features": [{"name": "_id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "title", "dtype": "string"}], "splits": [{"name": "queries", "num_bytes": 61244, "num_examples": 885}, {"name": "corpus", "num_bytes": 36704924, "num_examples": 37637}], "download_size": 20083359, "dataset_size": 36766168}}
2023-10-09T11:38:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqudupstack-gis" More Information needed
[ "# Dataset Card for \"cqudupstack-gis\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqudupstack-gis\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqudupstack-gis\"\n\nMore Information needed" ]
310e7633c25f2d7014d2d523d5e09efadab18e42
# Dataset Card for "cqadupstack-gis-qrels" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqadupstack-gis-qrels
[ "region:us" ]
2023-10-09T11:38:30+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "query-id", "dtype": "string"}, {"name": "corpus-id", "dtype": "string"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 28952, "num_examples": 1114}], "download_size": 0, "dataset_size": 28952}}
2023-10-09T11:41:29+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqadupstack-gis-qrels" More Information needed
[ "# Dataset Card for \"cqadupstack-gis-qrels\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqadupstack-gis-qrels\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqadupstack-gis-qrels\"\n\nMore Information needed" ]
5be715411fce21b0bd025776d847a2cc9066e671
# Dataset Card for "my_final_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
harinarayan/my_final_dataset
[ "region:us" ]
2023-10-09T11:39:13+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 472226.0, "num_examples": 33}], "download_size": 471990, "dataset_size": 472226.0}}
2023-10-09T11:39:16+00:00
[]
[]
TAGS #region-us
# Dataset Card for "my_final_dataset" More Information needed
[ "# Dataset Card for \"my_final_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"my_final_dataset\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"my_final_dataset\"\n\nMore Information needed" ]
d708352fc1caaef42e0ef5b73a0c548e03cca23f
# Dataset Card for "cqadupstack-android" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmrau/cqadupstack-android
[ "region:us" ]
2023-10-09T11:39:26+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "queries", "path": "data/queries-*"}, {"split": "corpus", "path": "data/corpus-*"}]}], "dataset_info": {"features": [{"name": "_id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "title", "dtype": "string"}], "splits": [{"name": "queries", "num_bytes": 47953, "num_examples": 699}, {"name": "corpus", "num_bytes": 12840959, "num_examples": 22998}], "download_size": 7657118, "dataset_size": 12888912}}
2023-10-09T11:39:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cqadupstack-android" More Information needed
[ "# Dataset Card for \"cqadupstack-android\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cqadupstack-android\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cqadupstack-android\"\n\nMore Information needed" ]